DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] ethdev: add namespace
@ 2021-06-29 13:46 Ferruh Yigit
  2021-06-29 15:02 ` Tyler Retzlaff
                   ` (2 more replies)
  0 siblings, 3 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-06-29 13:46 UTC (permalink / raw)
  To: Cristian Dumitrescu, Jasvinder Singh, Thomas Monjalon, Andrew Rybchenko
  Cc: Ferruh Yigit, dev

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
We can get the update on v21.11 and remove backward compatibility macros
on v22.11.
---
 app/test/test_kni.c       |   2 +-
 examples/qos_meter/main.c |   2 +-
 examples/qos_sched/init.c |   2 +-
 lib/ethdev/rte_ethdev.c   |   7 +-
 lib/ethdev/rte_ethdev.h   | 907 ++++++++++++++++++++++++--------------
 5 files changed, 574 insertions(+), 346 deletions(-)

diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 6e724f37835a..d0ff4ad2cbc9 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -65,7 +65,7 @@ static struct rte_eth_conf port_conf = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1abe003fc6ae..74c4fffc0207 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -61,7 +61,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index c607eabb5b0c..a602176af4cb 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -98,9 +98,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -126,14 +123,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index faf3bd901d75..2248105a82ba 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -249,7 +249,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -279,42 +279,74 @@ struct rte_eth_stats {
 /**
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG	RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED	RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD	RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M	RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD	RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M	RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G	RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G	RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G	RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G	RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G	RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G	RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G	RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G	RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G	RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G	RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G	RTE_ETH_LINK_SPEED_200G
 
 /**
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE	RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M	RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M	RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G	RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G	RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G	RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G	RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G	RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G	RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G	RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G	RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G	RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G	RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G	RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN	RTE_ETH_SPEED_NUM_UNKNOWN
 
 /**
  * A structure used to retrieve link-level information of an Ethernet port.
@@ -328,12 +360,18 @@ struct rte_eth_link {
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX	RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX	RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN	RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP	RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED	RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG	RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 
 /**
@@ -349,9 +387,12 @@ struct rte_eth_thresh {
 /**
  *  Simple flags are used for rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1
-#define ETH_MQ_RX_DCB_FLAG  0x2
-#define ETH_MQ_RX_VMDQ_FLAG 0x4
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG	RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG	RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG	RTE_ETH_MQ_RX_VMDQ_FLAG
 
 /**
  *  A set of values to identify what method is to be used to route
@@ -359,50 +400,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB,RSS or VMDQ mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For RX side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For RX side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDQ, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDQ */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDQ+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDQ and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the RX features of an Ethernet port.
@@ -415,7 +455,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -430,12 +470,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a vlan filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -506,37 +551,68 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               (1ULL << 2)
-#define ETH_RSS_FRAG_IPV4          (1ULL << 3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
-#define ETH_RSS_IPV6               (1ULL << 8)
-#define ETH_RSS_FRAG_IPV6          (1ULL << 9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
-#define ETH_RSS_L2_PAYLOAD         (1ULL << 14)
-#define ETH_RSS_IPV6_EX            (1ULL << 15)
-#define ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
-#define ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
-#define ETH_RSS_PORT               (1ULL << 18)
-#define ETH_RSS_VXLAN              (1ULL << 19)
-#define ETH_RSS_GENEVE             (1ULL << 20)
-#define ETH_RSS_NVGRE              (1ULL << 21)
-#define ETH_RSS_GTPU               (1ULL << 23)
-#define ETH_RSS_ETH                (1ULL << 24)
-#define ETH_RSS_S_VLAN             (1ULL << 25)
-#define ETH_RSS_C_VLAN             (1ULL << 26)
-#define ETH_RSS_ESP                (1ULL << 27)
-#define ETH_RSS_AH                 (1ULL << 28)
-#define ETH_RSS_L2TPV3             (1ULL << 29)
-#define ETH_RSS_PFCP               (1ULL << 30)
-#define ETH_RSS_PPPOE		   (1ULL << 31)
-#define ETH_RSS_ECPRI		   (1ULL << 32)
-#define ETH_RSS_MPLS		   (1ULL << 33)
+#define RTE_ETH_RSS_IPV4               (1ULL << 2)
+#define ETH_RSS_IPV4	RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          (1ULL << 3)
+#define ETH_RSS_FRAG_IPV4	RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_TCP	RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_UDP	RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP	RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER	RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               (1ULL << 8)
+#define ETH_RSS_IPV6	RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          (1ULL << 9)
+#define ETH_RSS_FRAG_IPV6	RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_TCP	RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_UDP	RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP	RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER	RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         (1ULL << 14)
+#define ETH_RSS_L2_PAYLOAD	RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            (1ULL << 15)
+#define ETH_RSS_IPV6_EX	RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
+#define ETH_RSS_IPV6_TCP_EX	RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
+#define ETH_RSS_IPV6_UDP_EX	RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               (1ULL << 18)
+#define ETH_RSS_PORT	RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              (1ULL << 19)
+#define ETH_RSS_VXLAN	RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             (1ULL << 20)
+#define ETH_RSS_GENEVE	RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              (1ULL << 21)
+#define ETH_RSS_NVGRE	RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               (1ULL << 23)
+#define ETH_RSS_GTPU	RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                (1ULL << 24)
+#define ETH_RSS_ETH	RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             (1ULL << 25)
+#define ETH_RSS_S_VLAN	RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             (1ULL << 26)
+#define ETH_RSS_C_VLAN	RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                (1ULL << 27)
+#define ETH_RSS_ESP	RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 (1ULL << 28)
+#define ETH_RSS_AH	RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             (1ULL << 29)
+#define ETH_RSS_L2TPV3	RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               (1ULL << 30)
+#define ETH_RSS_PFCP	RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE		   (1ULL << 31)
+#define ETH_RSS_PPPOE	RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI		   (1ULL << 32)
+#define ETH_RSS_ECPRI	RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS		   (1ULL << 33)
+#define ETH_RSS_MPLS	RTE_ETH_RSS_MPLS
 
 /*
  * We use the following macros to combine with above ETH_RSS_* for
@@ -547,12 +623,18 @@ struct rte_eth_rss_conf {
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
-#define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
-#define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
-#define ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
-#define ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
+#define ETH_RSS_L3_SRC_ONLY	RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        (1ULL << 62)
+#define ETH_RSS_L3_DST_ONLY	RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
+#define ETH_RSS_L4_SRC_ONLY	RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
+#define ETH_RSS_L2_SRC_ONLY	RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define ETH_RSS_L2_DST_ONLY	RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
@@ -580,22 +662,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT	RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST	RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST	RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK	RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)	RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -619,213 +706,277 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /**< Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64	RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128	RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256	RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512	RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE	RTE_ETH_RETA_GROUP_SIZE
 
 /* Definitions used for VMDQ and DCB functionality */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS	RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES	RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES	RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES	RTE_ETH_DCB_NUM_QUEUES
 
 /* DCB capability defines */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT	RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT	RTE_ETH_DCB_PFC_SUPPORT
 
 /* Definitions used for VLAN Offload functionality */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD	RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD	RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD	RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD	RTE_ETH_QINQ_STRIP_OFFLOAD
 
 /* Definitions used for mask VLAN setting */
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK	RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK	RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK	RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK	RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX	RTE_ETH_VLAN_ID_MAX
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR	RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY	RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /* Definitions used for VMDQ pool rx mode setting */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG	RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC	RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC	RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST	RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST	RTE_ETH_VMDQ_ACCEPT_MULTICAST
 
 /** Maximum nb. of vlan per mirror rule */
-#define ETH_MIRROR_MAX_VLANS       64
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS	RTE_ETH_MIRROR_MAX_VLANS
 
-#define ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
-#define ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
-#define ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
-#define ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
-#define ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP	RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT	RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT	RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN	RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN	RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
 
 /**
  * A structure used to configure VLAN traffic mirror of an Ethernet port.
@@ -865,20 +1016,26 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDQ configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
@@ -964,7 +1121,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1048,7 +1205,7 @@ struct rte_eth_rxconf {
 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1077,7 +1234,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1188,12 +1345,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
-	RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+	RTE_ETH_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1224,18 +1386,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1243,11 +1416,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in RX descriptors.
@@ -1264,9 +1442,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** RX queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1275,6 +1453,8 @@ struct rte_fdir_conf {
 	/**< Flex payload configuration. */
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1292,7 +1472,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1301,6 +1481,8 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the RX multi-queue mode, extra advanced
@@ -1348,39 +1530,60 @@ struct rte_eth_conf {
 /**
  * RX offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_JUMBO_FRAME	0x00000800
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP  0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP	RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM	RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO     0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO	RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP  0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP	RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP	RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT	0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT	RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER	RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	0x00000800
+#define DEV_RX_OFFLOAD_JUMBO_FRAME	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME
+#define RTE_ETH_RX_OFFLOAD_SCATTER	0x00002000
+#define DEV_RX_OFFLOAD_SCATTER		RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP	0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP	RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY     0x00008000
+#define DEV_RX_OFFLOAD_SECURITY		RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC	0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC		RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH	0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH	RTE_ETH_RX_OFFLOAD_RSS_HASH
 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
 
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1390,52 +1593,74 @@ struct rte_eth_conf {
 /**
  * TX offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT	RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM	RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO     0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO		RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO     0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO		RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT	RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT	RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS	RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**< Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 /**< Device supports optimization for fast release of mbufs.
  *   When set application must guarantee that per-queue all mbufs comes from
  *   the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY	RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO	RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP	RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1672,8 +1897,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS	RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL	RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1749,13 +1976,17 @@ struct rte_eth_fec_capa {
  */
 
 /**< l2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK	RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /**< l2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK	RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /**< l2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK	RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /**< l2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK	RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 
 /**
  * Function type used for RX packet processing packet callbacks.
@@ -2075,7 +2306,7 @@ uint16_t rte_eth_dev_count_total(void);
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2085,7 +2316,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2179,7 +2410,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -5224,7 +5455,7 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH] ethdev: add namespace
  2021-06-29 13:46 [dpdk-dev] [PATCH] ethdev: add namespace Ferruh Yigit
@ 2021-06-29 15:02 ` Tyler Retzlaff
  2021-06-30  6:29 ` David Marchand
  2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
  2 siblings, 0 replies; 32+ messages in thread
From: Tyler Retzlaff @ 2021-06-29 15:02 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Cristian Dumitrescu, Jasvinder Singh, Thomas Monjalon,
	Andrew Rybchenko, dev

On Tue, Jun 29, 2021 at 02:46:32PM +0100, Ferruh Yigit wrote:
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> way. The macros for backward compatibility can be removed in next LTS.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH] ethdev: add namespace
  2021-06-29 13:46 [dpdk-dev] [PATCH] ethdev: add namespace Ferruh Yigit
  2021-06-29 15:02 ` Tyler Retzlaff
@ 2021-06-30  6:29 ` David Marchand
  2021-06-30  9:08   ` Ferruh Yigit
  2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
  2 siblings, 1 reply; 32+ messages in thread
From: David Marchand @ 2021-06-30  6:29 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Cristian Dumitrescu, Jasvinder Singh, Thomas Monjalon,
	Andrew Rybchenko, dev, Dodji Seketeli

Hello Ferruh,

On Tue, Jun 29, 2021 at 3:46 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> way. The macros for backward compatibility can be removed in next LTS.
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

- I did not do a full check but I noticed that ETH_RSS compat macro at
least is removed.
Is this intentional?


- libabigail is not happy because of enum names changes.
Example:

  [C] 'function int rte_eth_dev_configure(uint16_t, uint16_t,
uint16_t, const rte_eth_conf*)' at rte_ethdev.c:1326:1 has some
indirect sub-type changes:
    parameter 4 of type 'const rte_eth_conf*' has sub-type changes:
      in pointed to type 'const rte_eth_conf':
        in unqualified underlying type 'struct rte_eth_conf' at
rte_ethdev.h:1491:1:
          type size hasn't changed
          5 data member changes (1 filtered):
            type of 'rte_eth_rxmode rxmode' changed:
              type size hasn't changed
              1 data member change:
                type of 'rte_eth_rx_mq_mode mq_mode' changed:
                  type size hasn't changed
                  8 enumerator deletions:
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_NONE' value '0'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_RSS' value '1'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_DCB' value '2'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_DCB_RSS' value '3'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_ONLY' value '4'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_RSS' value '5'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_DCB' value '6'
                    'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_DCB_RSS' value '7'
                  8 enumerator insertions:
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_NONE' value '0'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_RSS' value '1'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_DCB' value '2'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_DCB_RSS' value '3'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_ONLY' value '4'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_RSS' value '5'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_DCB' value '6'
                    'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_DCB_RSS' value '7'
[snip]


I guess libabigail is lost because the symbol
rte_eth_rx_mq_mode::ETH_MQ_RX_NONE simply disappeared (because we used
a macro to wrap to the new name).
Maybe we could go the other way: leave the current enums defined as is
and put in place wrappers for new names pointing as old names.
The rest of the code in DPDK would use the new names only.
This comment applies if we want to merge this change in 21.08 and/or
we want to backport this change.

This won't be a problem if we merge this patch in 21.11.


> ---
> We can get the update on v21.11 and remove backward compatibility macros
> on v22.11.



-- 
David Marchand


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH] ethdev: add namespace
  2021-06-30  6:29 ` David Marchand
@ 2021-06-30  9:08   ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-06-30  9:08 UTC (permalink / raw)
  To: David Marchand
  Cc: Cristian Dumitrescu, Jasvinder Singh, Thomas Monjalon,
	Andrew Rybchenko, dev, Dodji Seketeli, Qi Zhang,
	Raslan Darawsheh, Ajit Khaparde, jerinj

On 6/30/2021 7:29 AM, David Marchand wrote:
> Hello Ferruh,
> 
> On Tue, Jun 29, 2021 at 3:46 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>>
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>> way. The macros for backward compatibility can be removed in next LTS.
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> - I did not do a full check but I noticed that ETH_RSS compat macro at
> least is removed.
> Is this intentional?
> 

Yes, two groups of macros was remaining from 2013 for backward compatibility,
with this patch it would be two layer of redicrection, so I moved to old ones.
The only changes in the examples/app are because of these removed macros, since
rest are all backward compatible.

Removed ones:
/**
 * for rx mq mode backward compatible
 */
#define ETH_RSS                       ETH_MQ_RX_RSS
#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
#define ETH_DCB_RX                    ETH_MQ_RX_DCB

/**
 * for tx mq mode backward compatible
 */
#define ETH_DCB_NONE                ETH_MQ_TX_NONE
#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
#define ETH_DCB_TX                  ETH_MQ_TX_DCB



> 
> - libabigail is not happy because of enum names changes.
> Example:
> 
>   [C] 'function int rte_eth_dev_configure(uint16_t, uint16_t,
> uint16_t, const rte_eth_conf*)' at rte_ethdev.c:1326:1 has some
> indirect sub-type changes:
>     parameter 4 of type 'const rte_eth_conf*' has sub-type changes:
>       in pointed to type 'const rte_eth_conf':
>         in unqualified underlying type 'struct rte_eth_conf' at
> rte_ethdev.h:1491:1:
>           type size hasn't changed
>           5 data member changes (1 filtered):
>             type of 'rte_eth_rxmode rxmode' changed:
>               type size hasn't changed
>               1 data member change:
>                 type of 'rte_eth_rx_mq_mode mq_mode' changed:
>                   type size hasn't changed
>                   8 enumerator deletions:
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_NONE' value '0'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_RSS' value '1'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_DCB' value '2'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_DCB_RSS' value '3'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_ONLY' value '4'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_RSS' value '5'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_DCB' value '6'
>                     'rte_eth_rx_mq_mode::ETH_MQ_RX_VMDQ_DCB_RSS' value '7'
>                   8 enumerator insertions:
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_NONE' value '0'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_RSS' value '1'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_DCB' value '2'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_DCB_RSS' value '3'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_ONLY' value '4'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_RSS' value '5'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_DCB' value '6'
>                     'rte_eth_rx_mq_mode::RTE_ETH_MQ_RX_VMDQ_DCB_RSS' value '7'
> [snip]
> 
> 
> I guess libabigail is lost because the symbol
> rte_eth_rx_mq_mode::ETH_MQ_RX_NONE simply disappeared (because we used
> a macro to wrap to the new name).

Yes.

> Maybe we could go the other way: leave the current enums defined as is
> and put in place wrappers for new names pointing as old names. 
> The rest of the code in DPDK would use the new names only.

It works to prevent libabigail warnings but I think it can be confusing for
users on figuring out which ones are the correct ones to use.

> This comment applies if we want to merge this change in 21.08 and/or
> we want to backport this change.
> 
> This won't be a problem if we merge this patch in 21.11.
> 

OK to have it on v21.11. In that case I assume all internal components also
needs to be updated to use new macros/enums during v21.11 release.

Let me send a deprecation notice for it.

Meanwhile, we can use this patch to discuss the prefix/namespace, if it should
be 'RTE_ETH_' or 'RTE_ETH_DEV_', or mix of both (as done now).

And if mixed prefix used, we can try to define when to have DEV_ part and when
not to have it.

> 
>> ---
>> We can get the update on v21.11 and remove backward compatibility macros
>> on v22.11.
> 
> 
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-06-29 13:46 [dpdk-dev] [PATCH] ethdev: add namespace Ferruh Yigit
  2021-06-29 15:02 ` Tyler Retzlaff
  2021-06-30  6:29 ` David Marchand
@ 2021-08-27  1:19 ` Ferruh Yigit
  2021-08-27  7:59   ` Andrew Rybchenko
                     ` (2 more replies)
  2 siblings, 3 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-08-27  1:19 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.

Internal components switched to new enum & macro names.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
v2:
* Updated internal components
* Removed deprecation notice
---
 app/proc-info/main.c                          |   8 +-
 app/test-eventdev/test_perf_common.c          |   4 +-
 app/test-eventdev/test_pipeline_common.c      |  12 +-
 app/test-flow-perf/config.h                   |   2 +-
 app/test-pipeline/init.c                      |   8 +-
 app/test-pmd/cmdline.c                        | 290 +++---
 app/test-pmd/config.c                         | 198 ++--
 app/test-pmd/csumonly.c                       |  28 +-
 app/test-pmd/flowgen.c                        |   6 +-
 app/test-pmd/macfwd.c                         |   6 +-
 app/test-pmd/macswap_common.h                 |   6 +-
 app/test-pmd/parameters.c                     |  54 +-
 app/test-pmd/testpmd.c                        |  60 +-
 app/test-pmd/testpmd.h                        |   2 +-
 app/test-pmd/txonly.c                         |   6 +-
 app/test/test_ethdev_link.c                   |  68 +-
 app/test/test_event_eth_rx_adapter.c          |   4 +-
 app/test/test_kni.c                           |   2 +-
 app/test/test_link_bonding.c                  |   4 +-
 app/test/test_link_bonding_mode4.c            |   4 +-
 app/test/test_link_bonding_rssconf.c          |  10 +-
 app/test/test_pmd_perf.c                      |  12 +-
 app/test/virtual_pmd.c                        |  10 +-
 doc/guides/eventdevs/cnxk.rst                 |   2 +-
 doc/guides/eventdevs/octeontx2.rst            |   2 +-
 doc/guides/howto/debug_troubleshoot.rst       |   2 +-
 doc/guides/nics/bnxt.rst                      |  26 +-
 doc/guides/nics/enic.rst                      |   2 +-
 doc/guides/nics/features.rst                  | 116 +--
 doc/guides/nics/fm10k.rst                     |   6 +-
 doc/guides/nics/intel_vf.rst                  |  10 +-
 doc/guides/nics/ixgbe.rst                     |  12 +-
 doc/guides/nics/mlx5.rst                      |   4 +-
 doc/guides/nics/tap.rst                       |   2 +-
 .../generic_segmentation_offload_lib.rst      |   8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |  18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |   8 +-
 doc/guides/prog_guide/rte_flow.rst            |  34 +-
 doc/guides/prog_guide/rte_security.rst        |   2 +-
 doc/guides/rel_notes/deprecation.rst          |  12 +-
 doc/guides/sample_app_ug/ipsec_secgw.rst      |   4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |   2 +-
 drivers/bus/dpaa/include/process.h            |  16 +-
 drivers/common/cnxk/roc_npc.h                 |   2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |  16 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |  12 +-
 drivers/net/ark/ark_ethdev.c                  |  16 +-
 drivers/net/atlantic/atl_ethdev.c             |  90 +-
 drivers/net/atlantic/atl_ethdev.h             |  18 +-
 drivers/net/atlantic/atl_rxtx.c               |   6 +-
 drivers/net/avp/avp_ethdev.c                  |  26 +-
 drivers/net/axgbe/axgbe_dev.c                 |   6 +-
 drivers/net/axgbe/axgbe_ethdev.c              | 102 +-
 drivers/net/axgbe/axgbe_ethdev.h              |  12 +-
 drivers/net/axgbe/axgbe_mdio.c                |   2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |   6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |  16 +-
 drivers/net/bnxt/bnxt.h                       |  68 +-
 drivers/net/bnxt/bnxt_ethdev.c                | 170 ++--
 drivers/net/bnxt/bnxt_flow.c                  |   4 +-
 drivers/net/bnxt/bnxt_hwrm.c                  | 112 +--
 drivers/net/bnxt/bnxt_reps.c                  |   2 +-
 drivers/net/bnxt/bnxt_ring.c                  |   4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |  28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |   4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |   2 +-
 drivers/net/bnxt/bnxt_txr.c                   |   4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |  30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |   8 +-
 drivers/net/bonding/eth_bond_private.h        |   2 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |  16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |   6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |  42 +-
 drivers/net/cnxk/cn10k_ethdev.c               |  38 +-
 drivers/net/cnxk/cn10k_rx.c                   |   4 +-
 drivers/net/cnxk/cn10k_tx.c                   |   4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |  56 +-
 drivers/net/cnxk/cn9k_rx.c                    |   4 +-
 drivers/net/cnxk/cn9k_tx.c                    |   4 +-
 drivers/net/cnxk/cnxk_ethdev.c                |  84 +-
 drivers/net/cnxk/cnxk_ethdev.h                |  49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |   6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            | 104 +-
 drivers/net/cnxk/cnxk_link.c                  |  14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |   4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |   2 +-
 drivers/net/cxgbe/cxgbe.h                     |  48 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |  42 +-
 drivers/net/cxgbe/cxgbe_main.c                |  12 +-
 drivers/net/cxgbe/sge.c                       |   2 +-
 drivers/net/dpaa/dpaa_ethdev.c                | 190 ++--
 drivers/net/dpaa/dpaa_ethdev.h                |  10 +-
 drivers/net/dpaa/dpaa_flow.c                  |  32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |  34 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              | 148 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |  12 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   8 +-
 drivers/net/e1000/e1000_ethdev.h              |  18 +-
 drivers/net/e1000/em_ethdev.c                 |  68 +-
 drivers/net/e1000/em_rxtx.c                   |  48 +-
 drivers/net/e1000/igb_ethdev.c                | 158 +--
 drivers/net/e1000/igb_pf.c                    |   2 +-
 drivers/net/e1000/igb_rxtx.c                  | 116 +--
 drivers/net/ena/ena_ethdev.c                  |  70 +-
 drivers/net/ena/ena_ethdev.h                  |   4 +-
 drivers/net/ena/ena_rss.c                     |  66 +-
 drivers/net/enetc/enetc_ethdev.c              |  38 +-
 drivers/net/enic/enic_ethdev.c                |  80 +-
 drivers/net/enic/enic_main.c                  |  40 +-
 drivers/net/enic/enic_res.c                   |  52 +-
 drivers/net/failsafe/failsafe.c               |   8 +-
 drivers/net/failsafe/failsafe_intr.c          |   4 +-
 drivers/net/failsafe/failsafe_ops.c           |  82 +-
 drivers/net/fm10k/fm10k.h                     |   4 +-
 drivers/net/fm10k/fm10k_ethdev.c              | 140 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |   6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |  22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          | 134 +--
 drivers/net/hinic/hinic_pmd_rx.c              |  36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |  22 +-
 drivers/net/hns3/hns3_dcb.c                   |  14 +-
 drivers/net/hns3/hns3_ethdev.c                | 360 +++----
 drivers/net/hns3/hns3_ethdev.h                |  12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             | 108 +--
 drivers/net/hns3/hns3_flow.c                  |   6 +-
 drivers/net/hns3/hns3_ptp.c                   |   2 +-
 drivers/net/hns3/hns3_rss.c                   | 100 +-
 drivers/net/hns3/hns3_rss.h                   |  28 +-
 drivers/net/hns3/hns3_rxtx.c                  |  30 +-
 drivers/net/hns3/hns3_rxtx.h                  |   2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |  10 +-
 drivers/net/i40e/i40e_ethdev.c                | 270 +++---
 drivers/net/i40e/i40e_ethdev.h                |  24 +-
 drivers/net/i40e/i40e_ethdev_vf.c             | 110 +--
 drivers/net/i40e/i40e_flow.c                  |   2 +-
 drivers/net/i40e/i40e_hash.c                  | 156 +--
 drivers/net/i40e/i40e_pf.c                    |  14 +-
 drivers/net/i40e/i40e_rxtx.c                  |  10 +-
 drivers/net/i40e/i40e_rxtx.h                  |   4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |   2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |   8 +-
 drivers/net/i40e/i40e_vf_representor.c        |  48 +-
 drivers/net/iavf/iavf.h                       |  24 +-
 drivers/net/iavf/iavf_ethdev.c                | 178 ++--
 drivers/net/iavf/iavf_hash.c                  | 300 +++---
 drivers/net/iavf/iavf_rxtx.c                  |   2 +-
 drivers/net/iavf/iavf_rxtx.h                  |  24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |   4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |   6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   2 +-
 drivers/net/ice/ice_dcf.c                     |   2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |  90 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |  58 +-
 drivers/net/ice/ice_ethdev.c                  | 182 ++--
 drivers/net/ice/ice_ethdev.h                  |  26 +-
 drivers/net/ice/ice_hash.c                    | 268 +++---
 drivers/net/ice/ice_rxtx.c                    |   8 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |   4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |  26 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |   2 +-
 drivers/net/igc/igc_ethdev.c                  | 134 +--
 drivers/net/igc/igc_ethdev.h                  |  56 +-
 drivers/net/igc/igc_txrx.c                    |  50 +-
 drivers/net/ionic/ionic_ethdev.c              | 128 +--
 drivers/net/ionic/ionic_ethdev.h              |  12 +-
 drivers/net/ionic/ionic_lif.c                 |  36 +-
 drivers/net/ionic/ionic_rxtx.c                |  10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |  70 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              | 302 +++---
 drivers/net/ixgbe/ixgbe_ethdev.h              |  18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |  24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |   2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |  12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |  38 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                | 252 ++---
 drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |   2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |  16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |  16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |  14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |   4 +-
 drivers/net/kni/rte_eth_kni.c                 |   8 +-
 drivers/net/liquidio/lio_ethdev.c             | 102 +-
 drivers/net/memif/memif_socket.c              |   2 +-
 drivers/net/memif/rte_eth_memif.c             |  14 +-
 drivers/net/mlx4/mlx4_ethdev.c                |  32 +-
 drivers/net/mlx4/mlx4_flow.c                  |  30 +-
 drivers/net/mlx4/mlx4_intr.c                  |   8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |  20 +-
 drivers/net/mlx4/mlx4_txq.c                   |  24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |  54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |   6 +-
 drivers/net/mlx5/mlx5.c                       |   4 +-
 drivers/net/mlx5/mlx5.h                       |   2 +-
 drivers/net/mlx5/mlx5_defs.h                  |   6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |   6 +-
 drivers/net/mlx5/mlx5_flow.c                  |  54 +-
 drivers/net/mlx5/mlx5_flow.h                  |  12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |  44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |   4 +-
 drivers/net/mlx5/mlx5_rss.c                   |   2 +-
 drivers/net/mlx5/mlx5_rxq.c                   |  42 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |   8 +-
 drivers/net/mlx5/mlx5_tx.c                    |  30 +-
 drivers/net/mlx5/mlx5_txq.c                   |  52 +-
 drivers/net/mlx5/mlx5_vlan.c                  |   4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |   4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |  34 +-
 drivers/net/mvneta/mvneta_ethdev.h            |  12 +-
 drivers/net/mvneta/mvneta_rxtx.c              |   2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               | 116 +--
 drivers/net/netvsc/hn_ethdev.c                |  62 +-
 drivers/net/netvsc/hn_rndis.c                 |  50 +-
 drivers/net/nfb/nfb_ethdev.c                  |  20 +-
 drivers/net/nfb/nfb_rx.c                      |   2 +-
 drivers/net/nfp/nfp_common.c                  | 122 +--
 drivers/net/nfp/nfp_ethdev.c                  |   2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |   2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |  50 +-
 drivers/net/null/rte_eth_null.c               |  16 +-
 drivers/net/octeontx/octeontx_ethdev.c        |  78 +-
 drivers/net/octeontx/octeontx_ethdev.h        |  32 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |  26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |  96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |  66 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |  12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |  18 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |   8 +-
 drivers/net/octeontx2/otx2_flow.c             |   2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |  36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |   4 +-
 drivers/net/octeontx2/otx2_link.c             |  40 +-
 drivers/net/octeontx2/otx2_mcast.c            |   2 +-
 drivers/net/octeontx2/otx2_ptp.c              |   4 +-
 drivers/net/octeontx2/otx2_rss.c              |  62 +-
 drivers/net/octeontx2/otx2_rx.c               |   4 +-
 drivers/net/octeontx2/otx2_tx.c               |   2 +-
 drivers/net/octeontx2/otx2_vlan.c             |  42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |   8 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |   8 +-
 drivers/net/pcap/pcap_ethdev.c                |  12 +-
 drivers/net/pfe/pfe_ethdev.c                  |  18 +-
 drivers/net/qede/base/mcp_public.h            |   4 +-
 drivers/net/qede/qede_ethdev.c                | 138 +--
 drivers/net/qede/qede_filter.c                |  10 +-
 drivers/net/qede/qede_rxtx.c                  |   2 +-
 drivers/net/qede/qede_rxtx.h                  |  16 +-
 drivers/net/ring/rte_eth_ring.c               |  20 +-
 drivers/net/sfc/sfc.c                         |  30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |  10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |  20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |   4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |   8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |  32 +-
 drivers/net/sfc/sfc_ethdev.c                  |  42 +-
 drivers/net/sfc/sfc_flow.c                    |   2 +-
 drivers/net/sfc/sfc_port.c                    |  54 +-
 drivers/net/sfc/sfc_rx.c                      |  52 +-
 drivers/net/sfc/sfc_tx.c                      |  50 +-
 drivers/net/softnic/rte_eth_softnic.c         |  12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |  14 +-
 drivers/net/tap/rte_eth_tap.c                 | 104 +-
 drivers/net/tap/tap_rss.h                     |   2 +-
 drivers/net/thunderx/nicvf_ethdev.c           | 100 +-
 drivers/net/thunderx/nicvf_ethdev.h           |  42 +-
 drivers/net/txgbe/txgbe_ethdev.c              | 236 ++---
 drivers/net/txgbe/txgbe_ethdev.h              |  18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |  24 +-
 drivers/net/txgbe/txgbe_fdir.c                |  20 +-
 drivers/net/txgbe/txgbe_flow.c                |   2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |  12 +-
 drivers/net/txgbe/txgbe_pf.c                  |  34 +-
 drivers/net/txgbe/txgbe_rxtx.c                | 312 +++---
 drivers/net/txgbe/txgbe_rxtx.h                |   4 +-
 drivers/net/txgbe/txgbe_tm.c                  |  16 +-
 drivers/net/vhost/rte_eth_vhost.c             |  16 +-
 drivers/net/virtio/virtio_ethdev.c            | 126 +--
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |  74 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |  16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |  16 +-
 examples/bbdev_app/main.c                     |   6 +-
 examples/bond/main.c                          |  14 +-
 examples/distributor/main.c                   |  12 +-
 examples/ethtool/ethtool-app/main.c           |   2 +-
 examples/ethtool/lib/rte_ethtool.c            |  18 +-
 .../pipeline_worker_generic.c                 |  16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |  12 +-
 examples/flow_classify/flow_classify.c        |   4 +-
 examples/flow_filtering/main.c                |  16 +-
 examples/ioat/ioatfwd.c                       |   8 +-
 examples/ip_fragmentation/main.c              |  14 +-
 examples/ip_pipeline/link.c                   |  14 +-
 examples/ip_reassembly/main.c                 |  20 +-
 examples/ipsec-secgw/ipsec-secgw.c            |  34 +-
 examples/ipsec-secgw/sa.c                     |   8 +-
 examples/ipv4_multicast/main.c                |   8 +-
 examples/kni/main.c                           |  12 +-
 examples/l2fwd-crypto/main.c                  |  10 +-
 examples/l2fwd-event/l2fwd_common.c           |  10 +-
 examples/l2fwd-event/main.c                   |   2 +-
 examples/l2fwd-jobstats/main.c                |   8 +-
 examples/l2fwd-keepalive/main.c               |   8 +-
 examples/l2fwd/main.c                         |   8 +-
 examples/l3fwd-acl/main.c                     |  20 +-
 examples/l3fwd-graph/main.c                   |  16 +-
 examples/l3fwd-power/main.c                   |  18 +-
 examples/l3fwd/l3fwd_event.c                  |   4 +-
 examples/l3fwd/main.c                         |  20 +-
 examples/link_status_interrupt/main.c         |  10 +-
 .../client_server_mp/mp_server/init.c         |   4 +-
 examples/multi_process/symmetric_mp/main.c    |  14 +-
 examples/ntb/ntb_fwd.c                        |   6 +-
 examples/packet_ordering/main.c               |   4 +-
 .../performance-thread/l3fwd-thread/main.c    |  18 +-
 examples/pipeline/obj.c                       |  14 +-
 examples/ptpclient/ptpclient.c                |  10 +-
 examples/qos_meter/main.c                     |  16 +-
 examples/qos_sched/init.c                     |   6 +-
 examples/rxtx_callbacks/main.c                |   8 +-
 examples/server_node_efd/server/init.c        |   8 +-
 examples/skeleton/basicfwd.c                  |   4 +-
 examples/vhost/main.c                         |  28 +-
 examples/vm_power_manager/main.c              |   6 +-
 examples/vmdq/main.c                          |  20 +-
 examples/vmdq_dcb/main.c                      |  40 +-
 lib/ethdev/rte_ethdev.c                       | 187 ++--
 lib/ethdev/rte_ethdev.h                       | 907 +++++++++++-------
 lib/ethdev/rte_ethdev_core.h                  |   2 +-
 lib/ethdev/rte_flow.h                         |   2 +-
 lib/gso/rte_gso.c                             |  20 +-
 lib/gso/rte_gso.h                             |   4 +-
 lib/mbuf/rte_mbuf_core.h                      |   8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |   2 +-
 337 files changed, 6520 insertions(+), 6295 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index a8e928fa9ff3..963b6aa5c589 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index cc100650c21e..41e92143121b 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,14 +668,14 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 6ee530d4cdc9..96c8a5828364 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -199,7 +199,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 	port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
 	if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	t->internal_port = 1;
 	RTE_ETH_FOREACH_DEV(i) {
@@ -224,7 +224,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -234,9 +234,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 82253bc75110..d6db93557b95 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1490,51 +1490,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2185,33 +2185,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2225,44 +2225,44 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -3029,10 +3029,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3149,7 +3149,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3289,7 +3289,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4293,9 +4293,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4632,55 +4632,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4730,8 +4730,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4739,8 +4739,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4748,8 +4748,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4757,8 +4757,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4766,9 +4766,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4776,9 +4776,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4933,7 +4933,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4941,11 +4941,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4957,7 +4957,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5028,27 +5028,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5076,20 +5076,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5112,7 +5112,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7058,9 +7058,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7338,12 +7338,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7356,11 +7356,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7428,12 +7428,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -8950,13 +8950,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9356,7 +9356,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9422,13 +9422,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -9543,20 +9543,20 @@ cmd_set_mirror_mask_parsed(void *parsed_result,
 
 	memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
 
-	unsigned int vlan_list[ETH_MIRROR_MAX_VLANS];
+	unsigned int vlan_list[RTE_ETH_MIRROR_MAX_VLANS];
 
 	mr_conf.dst_pool = res->dstpool_id;
 
 	if (!strcmp(res->what, "pool-mirror-up")) {
 		mr_conf.pool_mask = strtoull(res->value, NULL, 16);
-		mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_UP;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VIRTUAL_POOL_UP;
 	} else if (!strcmp(res->what, "pool-mirror-down")) {
 		mr_conf.pool_mask = strtoull(res->value, NULL, 16);
-		mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_DOWN;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN;
 	} else if (!strcmp(res->what, "vlan-mirror")) {
-		mr_conf.rule_type = ETH_MIRROR_VLAN;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VLAN;
 		nb_item = parse_item_list(res->value, "vlan",
-				ETH_MIRROR_MAX_VLANS, vlan_list, 1);
+				RTE_ETH_MIRROR_MAX_VLANS, vlan_list, 1);
 		if (nb_item <= 0)
 			return;
 
@@ -9656,9 +9656,9 @@ cmd_set_mirror_link_parsed(void *parsed_result,
 
 	memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
 	if (!strcmp(res->what, "uplink-mirror"))
-		mr_conf.rule_type = ETH_MIRROR_UPLINK_PORT;
+		mr_conf.rule_type = RTE_ETH_MIRROR_UPLINK_PORT;
 	else
-		mr_conf.rule_type = ETH_MIRROR_DOWNLINK_PORT;
+		mr_conf.rule_type = RTE_ETH_MIRROR_DOWNLINK_PORT;
 
 	mr_conf.dst_pool = res->dstpool_id;
 
@@ -11823,7 +11823,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11834,7 +11834,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11920,7 +11920,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11928,7 +11928,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 31d8ba1b913c..1b0b9ab6d445 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,60 +86,60 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
 	{ NULL, 0 },
 };
 
@@ -474,39 +474,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -636,9 +636,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -656,22 +656,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -1166,7 +1166,7 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
 	diag = rte_eth_dev_set_mtu(port_id, mtu);
 	if (diag)
 		fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
-	else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	else if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		/*
 		 * Ether overhead in driver is equal to the difference of
 		 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
@@ -1175,12 +1175,12 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
 		eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
 		if (mtu > RTE_ETHER_MTU) {
 			rte_port->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			rte_port->dev_conf.rxmode.max_rx_pkt_len =
 						mtu + eth_overhead;
 		} else
 			rte_port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	}
 }
 
@@ -3118,7 +3118,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4181,11 +4181,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4211,11 +4211,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4256,11 +4256,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4286,11 +4286,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4360,7 +4360,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4369,7 +4369,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4377,7 +4377,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4396,7 +4396,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4404,8 +4404,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4414,8 +4414,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -4821,7 +4821,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 38cc256533b6..454a2d41c366 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 9348618d0f8d..7d658d002cb6 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -100,11 +100,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 0568ea794d48..1d878ba0a694 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 7c13210f04aa..1d0187723532 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -475,29 +475,29 @@ parse_event_printing_config(const char *optarg, int enable)
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -912,13 +912,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -960,34 +960,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1009,13 +1009,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1386,12 +1386,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6cbe9ba3c893..30bf897d6da8 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -337,7 +337,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -454,12 +454,12 @@ struct rte_eth_rxmode rx_mode = {
 };
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -513,7 +513,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1437,9 +1437,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 			"Updating jumbo frame offload failed for port %u\n",
 			pid);
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1566,8 +1566,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3154,7 +3154,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3414,17 +3414,17 @@ update_jumbo_frame_offload(portid_t portid)
 		port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
 
 	if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
-		rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		on = false;
 	} else {
-		if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+		if ((port->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0) {
 			fprintf(stderr,
 				"Frame size (%u) is not supported by port %u\n",
 				port->dev_conf.rxmode.max_rx_pkt_len,
 				portid);
 			return -1;
 		}
-		rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		on = true;
 	}
 
@@ -3436,16 +3436,16 @@ update_jumbo_frame_offload(portid_t portid)
 		/* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
 		for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
 			if (on)
-				port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+				port->rx_conf[qid].offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			else
-				port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				port->rx_conf[qid].offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		}
 	}
 
 	/* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
 	 * if unset do it here
 	 */
-	if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0) {
 		ret = rte_eth_dev_set_mtu(portid,
 				port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
 		if (ret)
@@ -3486,9 +3486,9 @@ init_port_config(void)
 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			else
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 		}
 
 		rxtx_port_config(port);
@@ -3575,9 +3575,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3585,7 +3585,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3593,8 +3593,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3610,23 +3610,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3653,7 +3653,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3703,7 +3703,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(rte_port);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 16a3598e48c5..e4ad8a6a7cff 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -446,7 +446,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 /*
  * Configuration of packet segments used to scatter received packets
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index aed820f5d340..5409d7a0deb0 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -352,11 +352,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..7c0ebec5bd4b 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M,
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 9198767b4194..bb7917010d62 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -106,7 +106,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -121,7 +121,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 8a5c8310a8b4..23c024aa1b0c 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,12 +134,12 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 2c835fa7adc7..1556f14d6921 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,12 +107,12 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index 5dac60ca1edd..93caaf986c2f 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -80,29 +80,29 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 3a248d512c4a..da7b7ad1f7cc 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,12 +62,12 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -156,7 +156,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -823,7 +823,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -832,13 +832,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7036f401ed95..6eecfa385537 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -178,7 +178,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -574,9 +574,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/howto/debug_troubleshoot.rst b/doc/guides/howto/debug_troubleshoot.rst
index 457ac441429a..13f30e39363e 100644
--- a/doc/guides/howto/debug_troubleshoot.rst
+++ b/doc/guides/howto/debug_troubleshoot.rst
@@ -71,7 +71,7 @@ RX Port and associated core :numref:`dtg_rx_rate`.
    * Identify if port Speed and Duplex is matching to desired values with
      ``rte_eth_link_get``.
 
-   * Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
+   * Check ``RTE_ETH_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
 
    * Check promiscuous mode if the drops do not occur for unique MAC address
      with ``rte_eth_promiscuous_get``.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index e75f4fa9e3bc..77827e750195 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,22 +877,22 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_JUMBO_FRAME
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_JUMBO_FRAME
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index a96e12d15515..7f7d6ae45658 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -165,7 +165,7 @@ Jumbo frame
 
 Supports Rx jumbo frames.
 
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_JUMBO_FRAME``.
   ``dev_conf.rxmode.max_rx_pkt_len``.
 * **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
 * **[related] API**: ``rte_eth_dev_set_mtu()``.
@@ -178,7 +178,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -206,12 +206,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -222,12 +222,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -288,9 +288,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -303,7 +303,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -340,7 +340,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -363,7 +363,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -379,7 +379,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -428,12 +428,12 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -449,13 +449,13 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -469,7 +469,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -479,13 +479,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -497,14 +497,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -518,7 +518,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -529,16 +529,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -548,8 +548,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -557,8 +557,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -567,10 +567,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -580,11 +580,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -594,16 +594,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -613,15 +613,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_packet_type_parsing:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index 7b8ef0e7823d..3dff65d89b6d 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index fcea8151bf3c..e60e3b2a761d 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -222,21 +222,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index b82e63438285..24fbccc982f5 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -69,13 +69,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -143,13 +143,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index bae73f42d882..6facb68b9545 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -607,7 +607,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate max_rx_pkt_len as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 2b42d5ec8c05..1bac8f04b96e 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1835,23 +1835,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index f72bc8a78fa6..e3bd451917f0 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -560,7 +560,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 76a4abfd6b0b..20159a1c9a90 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -58,22 +58,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
@@ -98,7 +92,7 @@ Deprecation Notices
   either by ``rte_eth_dev_configure()`` or ``rte_eth_dev_set_mtu()``.
 
   An application may need to configure device for a specific Rx packet size, like for
-  cases ``DEV_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
+  cases ``RTE_ETH_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
   can't be bigger than Rx buffer size.
   To cover these cases an application needs to know the device packet overhead to be
   able to calculate the ``mtu`` corresponding to a Rx buffer size, for this
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 6061674239f4..d7f5951d4639 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -526,7 +526,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index bab25fd72eee..360bf75d3861 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -153,7 +153,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index b73b211fd249..fb5d549e6227 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -91,10 +91,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -265,7 +265,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -295,7 +295,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -316,8 +316,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 74ffa4511284..dbf745852716 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -654,7 +654,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -663,7 +663,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 0ce35eb519e2..5af1cff3770e 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,21 +154,21 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_JUMBO_FRAME \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_JUMBO_FRAME \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -489,7 +489,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -656,18 +656,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1128,10 +1128,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1176,10 +1176,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1199,8 +1199,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1334,7 +1334,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1533,13 +1533,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1554,13 +1554,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1731,14 +1731,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1754,10 +1754,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index f547571b5c97..da993be35faa 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 7d367c9306ec..ddf110d6ce7e 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 623fa5e5ff5b..e870ced7e992 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -2011,9 +2011,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2153,8 +2153,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2204,8 +2204,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2218,9 +2218,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2229,13 +2229,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 786288a7b079..c0f033e06b15 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 9cb4818af11f..d4ba06c43a61 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -383,7 +383,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 
 	rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -588,13 +588,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -763,7 +763,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1206,25 +1206,25 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_JUMBO_FRAME	|
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	|
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1261,13 +1261,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1297,13 +1297,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1385,15 +1385,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1492,11 +1492,11 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 	if (frame_size > AXGBE_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		val = 1;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		val = 0;
 	}
 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
@@ -1842,8 +1842,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1860,8 +1860,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1878,11 +1878,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1916,8 +1916,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1927,8 +1927,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1938,14 +1938,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 33f709a6bb02..baa17a5fb43f 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 463886f17a58..14d91f868cd8 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -181,7 +181,7 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE(sc);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 		dev->data->mtu = sc->mtu;
 	}
@@ -412,7 +412,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -538,8 +538,8 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -675,7 +675,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 494a1eff3700..7e313c2fb5af 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,40 +569,40 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
-				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_VLAN_STRIP | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index de34a2f0bb2d..99d4953305e3 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -738,11 +738,11 @@ static int bnxt_start_nic(struct bnxt *bp)
 
 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags |= BNXT_FLAG_JUMBO;
 	} else {
 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags &= ~BNXT_FLAG_JUMBO;
 	}
 
@@ -908,35 +908,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -980,8 +980,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
@@ -1030,8 +1030,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1126,18 +1126,18 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		eth_dev->data->mtu =
 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
@@ -1168,7 +1168,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1184,10 +1184,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1232,16 +1232,16 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_JUMBO_FRAME |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1293,7 +1293,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1594,10 +1594,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1819,8 +1819,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2014,7 +2014,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2120,7 +2120,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2138,7 +2138,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2183,30 +2183,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2246,17 +2246,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2279,11 +2279,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2294,7 +2294,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2305,7 +2305,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2336,7 +2336,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2351,7 +2351,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		bp->vxlan_port_cnt++;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2389,7 +2389,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2406,7 +2406,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2584,7 +2584,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2604,7 +2604,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2617,7 +2617,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2656,7 +2656,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2674,7 +2674,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2694,22 +2694,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2724,10 +2724,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2739,7 +2739,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2767,7 +2767,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2807,7 +2807,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -3029,10 +3029,10 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
 	if (new_mtu > RTE_ETHER_MTU) {
 		bp->flags |= BNXT_FLAG_JUMBO;
 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	} else {
 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags &= ~BNXT_FLAG_JUMBO;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 59489b591a6f..98e1107f629c 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -974,7 +974,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1157,7 +1157,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index f29d57423585..0d9dda0c362c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2955,12 +2955,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -2977,51 +2977,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3034,11 +3034,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3047,13 +3047,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3083,71 +3083,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3160,16 +3160,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3198,12 +3198,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3229,7 +3229,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3320,7 +3320,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index bdbad53b7d7f..a9f5e13476b0 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -536,7 +536,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 957b175f1b89..632a611bf612 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -185,7 +185,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 	int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
 	int tpa_info_len = 0;
 
-	if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 		tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -278,7 +278,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 				    ag_bitmap_start, ag_bitmap_len);
 
 		/* TPA info */
-		if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 			rx_ring_info->tpa_info =
 				((struct bnxt_tpa_info *)((char *)mz->addr +
 							  tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index bbcb3b06e7df..0ac3a2b3b7d3 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -41,13 +41,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -55,14 +55,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -100,7 +100,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -110,8 +110,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -136,14 +136,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -338,7 +338,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -454,7 +454,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -525,7 +525,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 73fbdd17d126..0909bab89b76 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 47824334ae3e..401dd83f4e7d 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -350,7 +350,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -476,7 +476,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..fcf878b9b858 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,7 +167,7 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
 			RTE_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 128754f4595a..20adfcf0ea9c 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -866,7 +866,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index eb8d15d16034..f12060bcafb0 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -586,7 +586,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		 if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -721,7 +721,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index a6755661c49c..2482bb1cbc02 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1373,8 +1373,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1704,7 +1704,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (internals->rss_key_len != 0) {
 			slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1721,23 +1721,23 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
 			bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
 	nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
@@ -1838,7 +1838,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1961,7 +1961,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2101,7 +2101,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2423,15 +2423,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2456,7 +2456,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2498,7 +2498,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2872,7 +2872,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2888,7 +2888,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -3279,7 +3279,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3508,7 +3508,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
 			internals->rss_key_len =
 				dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 7caec6cf14c8..9a09748673b2 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,22 +15,22 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
@@ -69,36 +69,36 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index 69e767ac3dd6..e3b1bd8ad225 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -76,12 +76,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index 0e1276c60ba2..f63b8fabefd4 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -77,11 +77,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 115e678916bb..9ff2d3dc114a 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,22 +15,22 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
@@ -69,36 +69,36 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -277,9 +277,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -530,17 +530,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 7d9f1bd61f79..08ee28658bce 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -76,12 +76,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index 763f9a14fd79..f35ae8e70438 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -76,11 +76,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0e3652ed5109..f6b75645bb69 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -54,8 +54,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -90,7 +90,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -98,10 +98,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -122,11 +122,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -169,7 +169,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -361,7 +361,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -434,24 +434,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -460,34 +460,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -513,7 +513,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -729,8 +729,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -778,13 +778,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -814,7 +814,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1191,12 +1191,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 2528b3cdaa0c..53a657f8865d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -54,41 +54,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
-	 DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP |                  \
-	 DEV_RX_OFFLOAD_VLAN_STRIP)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |                 \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |            \
+	 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH | RTE_ETH_RX_OFFLOAD_TIMESTAMP |                  \
+	 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 37720fb0954e..bf0c6d6b4ad8 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -49,11 +49,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index b6cc5286c6d0..fa6b8aa4f0c5 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,25 +81,25 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -143,28 +143,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -204,8 +204,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -265,10 +265,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -409,13 +409,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -443,9 +443,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	frame_size += RTE_ETHER_CRC_LEN;
 
 	if (frame_size > RTE_ETHER_MAX_LEN)
-		dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -816,7 +816,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 3fdbdba49549..1cff8d56e65b 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -66,7 +66,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -94,17 +94,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index 32c1b5dee5fa..ecdfee7b11a6 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 7c89a028bf16..dee618a0db5f 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,32 +28,32 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 177eca397600..4b5ab6f62971 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -316,10 +316,10 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	/* set to jumbo mode if needed */
 	if (new_mtu > CXGBE_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
 			    -1, -1, true);
@@ -396,7 +396,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -460,9 +460,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -685,10 +685,10 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	/* Set to jumbo mode if necessary */
 	if (pkt_len > CXGBE_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
 			       &rxq->fl, NULL,
@@ -1079,13 +1079,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1098,12 +1098,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1199,9 +1199,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1478,7 +1478,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1487,7 +1487,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1496,7 +1496,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 6dd1bf1f836e..54723edc2144 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1671,7 +1671,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1695,7 +1695,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1726,10 +1726,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1866,7 +1866,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1953,7 +1953,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index e5f7721dc4b3..eddb818c4861 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -366,7 +366,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
 	int ret, i;
 	struct rte_pktmbuf_pool_private *mbp_priv;
 	u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_JUMBO_FRAME;
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
 	mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 27d670f843d2..c466256137a3 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,30 +54,30 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -189,10 +189,10 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > DPAA_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 		tx_offloads, dev_tx_offloads_nodis);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len;
 
 		DPAA_PMD_DEBUG("enabling jumbo");
@@ -259,7 +259,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 			- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -304,43 +304,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -556,30 +556,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -612,13 +612,13 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -645,14 +645,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -686,7 +686,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -697,15 +697,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -981,7 +981,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
 			buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("max RxPkt size %d too big to fit "
@@ -1303,7 +1303,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1319,7 +1319,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1349,10 +1349,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1396,11 +1396,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1663,10 +1663,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 641e7027f12e..7c92b2a42e3f 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -216,7 +216,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -233,7 +233,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -270,13 +270,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -314,12 +314,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -346,8 +346,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c12169578e22..23bb985b95e9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,34 +38,34 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -151,7 +151,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -252,13 +252,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -271,10 +271,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -292,16 +292,16 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -328,15 +328,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -559,7 +559,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		tx_offloads, dev_tx_offloads_nodis);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 				priv->token, eth_conf->rxmode.max_rx_pkt_len
@@ -578,7 +578,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -592,12 +592,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -615,7 +615,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -628,12 +628,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -665,8 +665,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1477,10 +1477,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > DPAA2_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -1881,7 +1881,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1893,9 +1893,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2056,9 +2056,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2068,9 +2068,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2114,14 +2114,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2129,7 +2129,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2137,7 +2137,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index b9c729f6cdc0..ca75a2175524 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,12 +65,12 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 3b4d9c3ee6f4..ca488fea966f 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -81,15 +81,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a0ca371b0275..81f8bc3cd746 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -599,8 +599,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | \
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -613,39 +613,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1104,9 +1104,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1164,17 +1164,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1426,15 +1426,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if(mask & RTE_ETH_VLAN_STRIP_MASK){
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if(mask & RTE_ETH_VLAN_FILTER_MASK){
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1603,7 +1603,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1685,13 +1685,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1820,11 +1820,11 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > E1000_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index dfd8f2fd0074..cf672c32277b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -172,7 +172,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1168,11 +1168,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1367,15 +1367,15 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 	max_rx_pktlen = em_get_max_pktlen(dev);
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
-		rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	return rx_offload_capa;
 }
@@ -1468,7 +1468,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1806,7 +1806,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1839,7 +1839,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME ||
 				rctl_bsize < RTE_ETHER_MAX_LEN) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1849,7 +1849,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1862,7 +1862,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1874,21 +1874,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	if ((hw->mac.type == e1000_ich9lan ||
 			hw->mac.type == e1000_pch2lan ||
 			hw->mac.type == e1000_ich10lan) &&
-			rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+			rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
 		E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
 	}
 
 	if (hw->mac.type == e1000_pch2lan) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
 		else
 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1908,7 +1908,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		rctl |= E1000_RCTL_LPE;
 	else
 		rctl &= ~E1000_RCTL_LPE;
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 10ee0f33415a..7a35d7d89eb1 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1082,21 +1082,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1108,12 +1108,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1126,17 +1126,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1155,8 +1155,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1296,8 +1296,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | \
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1305,7 +1305,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1319,39 +1319,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2194,21 +2194,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2234,7 +2234,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2260,9 +2260,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2305,12 +2305,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2411,17 +2411,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2597,7 +2597,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2686,7 +2686,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 				dev->data->dev_conf.rxmode.max_rx_pkt_len);
 }
@@ -2704,7 +2704,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						VLAN_TAG_SIZE);
@@ -2716,22 +2716,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if(mask & RTE_ETH_VLAN_STRIP_MASK){
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if(mask & RTE_ETH_VLAN_FILTER_MASK){
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if(mask & RTE_ETH_VLAN_EXTEND_MASK){
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2883,7 +2883,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3037,13 +3037,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3112,18 +3112,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3271,22 +3271,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3584,10 +3584,10 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
@@ -3625,10 +3625,10 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
@@ -4407,11 +4407,11 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > E1000_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 278d5d2712af..78c85fdbb51c 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /**
@@ -185,7 +185,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1456,13 +1456,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1635,20 +1635,20 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_JUMBO_FRAME |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1729,7 +1729,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1963,23 +1963,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2045,23 +2045,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2183,15 +2183,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2228,7 +2228,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
 		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
+                        (cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) | \
 			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
 			E1000_VLVF_POOLSEL_MASK)));
 	}
@@ -2281,7 +2281,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2295,14 +2295,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2342,7 +2342,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 		rctl |= E1000_RCTL_LPE;
@@ -2351,7 +2351,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2387,7 +2387,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2458,7 +2458,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2502,16 +2502,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2519,7 +2519,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2559,7 +2559,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2758,7 +2758,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 4cebf60a68a7..4e3ee72608f4 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -116,10 +116,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -310,7 +310,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -318,7 +318,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -335,12 +335,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -623,9 +623,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -684,7 +684,7 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
 	uint32_t max_frame_len = adapter->max_mtu;
 
 	if (adapter->edev_data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_JUMBO_FRAME)
+	    RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		max_frame_len =
 			adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
 
@@ -915,7 +915,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -1854,9 +1854,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
@@ -1907,36 +1907,36 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Set Tx & Rx features available for device */
 	if (adapter->offloads.tso4_supported)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
+		tx_feat	|= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_csum_supported)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+		tx_feat |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_csum_supported)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM  |
-			DEV_RX_OFFLOAD_TCP_CKSUM;
+		rx_feat |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
-	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	rx_feat |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+	tx_feat |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
 	if (adapter->offloads.rss_hash_supported)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
@@ -2100,7 +2100,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 06ac8b06b5cb..3b1844e50982 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -54,8 +54,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 88afe13da04d..3193faf1fa8c 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -140,7 +140,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -200,34 +200,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -236,10 +236,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -248,10 +248,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -269,11 +269,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -285,11 +285,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -335,43 +335,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -542,7 +542,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index b496cd470045..e0fb44edeb41 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,11 +207,11 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC |
-		 DEV_RX_OFFLOAD_JUMBO_FRAME);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 
 	return 0;
 }
@@ -462,7 +462,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -679,10 +679,10 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > ENETC_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads &=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
@@ -708,7 +708,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len;
 
 		max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -723,7 +723,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 			RTE_ETHER_CRC_LEN;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -731,10 +731,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8d5797523b8f..d4858326ed7a 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -293,8 +293,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -319,17 +319,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -431,14 +431,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -879,7 +879,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -965,8 +965,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1006,7 +1006,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1035,7 +1035,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 2affd380c6a4..754cf362c6d8 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
@@ -1386,15 +1386,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1405,12 +1405,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1751,9 +1751,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index a8f5332a407f..12f734260ca5 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,20 +201,20 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 8216063a3d8b..9b22a6ce8941 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 5ff33e03e034..8cb215651df8 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1182,53 +1182,53 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 916b856acc4b..7af115399e0f 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 3236290e4021..b5935d714a37 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,21 +1767,21 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_JUMBO_FRAME |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1966,12 +1966,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2199,15 +2199,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2244,15 +2244,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2607,7 +2607,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2623,7 +2623,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index 1a7240154668..17f32692fb2d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,25 +732,25 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_JUMBO_FRAME |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -847,20 +847,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -902,8 +902,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1552,10 +1552,10 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	frame_size = HINIC_MTU_TO_PKTLEN(mtu);
 	if (frame_size > HINIC_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	nic_dev->mtu_size = mtu;
@@ -1664,8 +1664,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1686,8 +1686,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1873,13 +1873,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1893,14 +1893,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1944,7 +1944,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1965,14 +1965,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -2008,7 +2008,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2029,15 +2029,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2067,7 +2067,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index b71e2e9ea451..953c146d0200 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7d37004972bf..64d1da09a707 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2218,9 +2218,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2228,7 +2228,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2237,7 +2237,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2380,7 +2380,7 @@ hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
 	uint16_t mtu;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME))
 		return 0;
 
 	/*
@@ -2440,11 +2440,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2504,15 +2504,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2533,7 +2533,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2633,10 +2633,10 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (is_jumbo_frame)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	rte_spinlock_unlock(&hw->lock);
 
@@ -2649,15 +2649,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2668,19 +2668,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2699,7 +2699,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2725,41 +2725,41 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_JUMBO_FRAME |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_outer_udp_cksum_supported(hw))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_indep_txrx_supported(hw))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_ptp_supported(hw))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2843,7 +2843,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2856,29 +2856,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2898,8 +2898,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2911,7 +2911,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3257,31 +3257,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3610,39 +3610,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4305,14 +4305,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4562,7 +4562,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4603,7 +4603,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4723,8 +4723,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4735,8 +4735,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4786,7 +4786,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4813,7 +4813,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4932,10 +4932,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4963,7 +4963,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5145,19 +5145,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5395,20 +5395,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5424,26 +5424,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5478,28 +5478,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5507,11 +5507,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5645,9 +5645,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5920,7 +5920,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6131,17 +6131,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6287,7 +6287,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6587,7 +6587,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6877,7 +6877,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6909,7 +6909,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -7008,12 +7008,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0e4e4269a12f..c40d28af1d46 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -191,10 +191,10 @@ struct hns3_mac {
 	bool default_addr_setted; /* whether default addr(mac_addr) is set */
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1114,9 +1114,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 8d9b7979c806..53d79bb2106c 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -809,15 +809,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -829,7 +829,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	 * If jumbo frames are enabled, MTU needs to be refreshed
 	 * according to the maximum RX packet length.
 	 */
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
 		if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
 		    max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
@@ -853,7 +853,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -931,10 +931,10 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 	if (mtu > RTE_ETHER_MTU)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	rte_spinlock_unlock(&hw->lock);
 
@@ -963,33 +963,33 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_JUMBO_FRAME |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_outer_udp_cksum_supported(hw))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_indep_txrx_supported(hw))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1669,10 +1669,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1682,10 +1682,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1753,7 +1753,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1778,8 +1778,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2088,7 +2088,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2247,31 +2247,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2599,11 +2599,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index fc77979c5f14..0ac8705b590b 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index df8485904688..395590c86c03 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..2c5661567945 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 0f222b37f9d1..01e43791572b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1912,7 +1912,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1957,7 +1957,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2833,7 +2833,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3127,7 +3127,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4279,7 +4279,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_ptp_supported(hw))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4291,16 +4291,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index cd7c21c1d0c8..2fa3a01dd3bf 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index 844512f6ceec..d01a8d62bfb1 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_ptp_supported(hw))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_ptp_supported(hw))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b230e2ed17a..0d9ebf208614 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1641,7 +1641,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1909,8 +1909,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1944,13 +1944,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2227,17 +2227,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2345,13 +2345,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2910,34 +2910,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2964,8 +2964,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2980,28 +2980,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -3018,9 +3018,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3748,34 +3748,34 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3834,7 +3834,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3848,17 +3848,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3897,7 +3897,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3944,12 +3944,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3963,12 +3963,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -4027,37 +4027,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4140,17 +4140,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4166,10 +4166,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4316,7 +4316,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4469,7 +4469,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4512,7 +4512,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4847,7 +4847,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6132,10 +6132,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6262,9 +6262,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7117,7 +7117,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7152,7 +7152,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -8730,16 +8730,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8765,12 +8765,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8862,7 +8862,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8870,7 +8870,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8949,7 +8949,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10412,8 +10412,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		return I40E_ERR_NO_MEMORY;
 	}
 	switch (mirror_conf->rule_type) {
-	case ETH_MIRROR_VLAN:
-		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
+	case RTE_ETH_MIRROR_VLAN:
+		for (i = 0, j = 0; i < RTE_ETH_MIRROR_MAX_VLANS; i++) {
 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
 				mirr_rule->entries[j] =
 					mirror_conf->vlan.vlan_id[i];
@@ -10427,8 +10427,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		}
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
 		break;
-	case ETH_MIRROR_VIRTUAL_POOL_UP:
-	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
+	case RTE_ETH_MIRROR_VIRTUAL_POOL_UP:
+	case RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN:
 		/* check if the specified pool bit is out of range */
 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
@@ -10453,15 +10453,15 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		}
 		/* egress and ingress in aq commands means from switch but not port */
 		mirr_rule->rule_type =
-			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
+			(mirror_conf->rule_type == RTE_ETH_MIRROR_VIRTUAL_POOL_UP) ?
 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
 		break;
-	case ETH_MIRROR_UPLINK_PORT:
+	case RTE_ETH_MIRROR_UPLINK_PORT:
 		/* egress and ingress in aq commands means from switch but not port*/
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
 		break;
-	case ETH_MIRROR_DOWNLINK_PORT:
+	case RTE_ETH_MIRROR_DOWNLINK_PORT:
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
 		break;
 	default:
@@ -10603,16 +10603,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10840,7 +10840,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11348,7 +11348,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11396,7 +11396,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
@@ -11774,10 +11774,10 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > I40E_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cd6deabd60b3..f21c2de6bdb9 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -139,17 +139,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1076,7 +1076,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 0cfe13b7b227..192e7234909f 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1077,7 +1077,7 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 	 * VLAN_STRIP by default. So reconfigure the vlan_offload
 	 * as it was done by the app earlier.
 	 */
-	err = i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+	err = i40evf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to set vlan_strip");
 
@@ -1403,28 +1403,28 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
 				pf_msg->event_data.link_event_adv.link_status;
 
 			switch (pf_msg->event_data.link_event_adv.link_speed) {
-			case ETH_SPEED_NUM_100M:
+			case RTE_ETH_SPEED_NUM_100M:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_100MB;
 				break;
-			case ETH_SPEED_NUM_1G:
+			case RTE_ETH_SPEED_NUM_1G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_1GB;
 				break;
-			case ETH_SPEED_NUM_2_5G:
+			case RTE_ETH_SPEED_NUM_2_5G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_2_5GB;
 				break;
-			case ETH_SPEED_NUM_5G:
+			case RTE_ETH_SPEED_NUM_5G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_5GB;
 				break;
-			case ETH_SPEED_NUM_10G:
+			case RTE_ETH_SPEED_NUM_10G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_10GB;
 				break;
-			case ETH_SPEED_NUM_20G:
+			case RTE_ETH_SPEED_NUM_20G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_20GB;
 				break;
-			case ETH_SPEED_NUM_25G:
+			case RTE_ETH_SPEED_NUM_25G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_25GB;
 				break;
-			case ETH_SPEED_NUM_40G:
+			case RTE_ETH_SPEED_NUM_40G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_40GB;
 				break;
 			default:
@@ -1770,7 +1770,7 @@ static int
 i40evf_init_vlan(struct rte_eth_dev *dev)
 {
 	/* Apply vlan offload setting */
-	i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+	i40evf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK);
 
 	return 0;
 }
@@ -1785,9 +1785,9 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40evf_enable_vlan_strip(dev);
 		else
 			i40evf_disable_vlan_strip(dev);
@@ -1933,7 +1933,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 	/**
 	 * Check if the jumbo frame and maximum packet length are set correctly
 	 */
-	if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
 		    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1954,7 +1954,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 		}
 	}
 
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -2290,35 +2290,35 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
 	/* Linux driver PF host */
 	switch (vf->link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (vf->link_up)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			new_link.link_speed = ETH_SPEED_NUM_NONE;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 	/* full duplex only */
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-		!(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+		!(dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -2367,36 +2367,36 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	dev_info->tx_queue_offload_capa = 0;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -2596,10 +2596,10 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_64) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_64) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number of hardware can "
-			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+			"support (%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_64);
 		return -EINVAL;
 	}
 
@@ -2635,10 +2635,10 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	uint8_t *lut;
 	int ret;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_64) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_64) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number of hardware can "
-			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+			"support (%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_64);
 		return -EINVAL;
 	}
 
@@ -2770,7 +2770,7 @@ i40evf_config_rss(struct i40e_vf *vf)
 	uint8_t *lut_info;
 	int ret;
 
-	if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (vf->dev_data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		i40evf_disable_rss(vf);
 		PMD_DRV_LOG(DEBUG, "RSS not configured");
 		return 0;
@@ -2887,10 +2887,10 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > I40E_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
 	return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3c1570bd9c47..d1cb992be61d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 1fb8c9abfcc6..3755d4d3fe2a 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -102,47 +102,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -201,30 +201,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -232,68 +232,68 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -308,27 +308,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -564,29 +564,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -825,7 +825,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -932,7 +932,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1070,22 +1070,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8329cbdd4e30..3bad4052ed1b 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -2005,7 +2005,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2265,7 +2265,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -2925,7 +2925,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 	rxq->max_pkt_len =
 		RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
 			rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -3441,7 +3441,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 5ccf5773e857..303a4db47dbd 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 };
 
 struct i40e_tx_entry {
@@ -165,7 +165,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index bd21d6422394..5f00d43950aa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -899,7 +899,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52ed98d62d0..0192164c35fa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 0481b5538132..6d90b0f3511b 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd07811198..1d4383e89327 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -48,18 +48,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 574cfe055e7c..94f6f4704b9c 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -265,53 +265,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -330,13 +330,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -362,10 +362,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -466,7 +466,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -478,10 +478,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -511,8 +511,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -585,7 +585,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	/* Check if the jumbo frame and maximum packet length are set
 	 * correctly.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
 		    max_pkt_len > IAVF_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -608,7 +608,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -943,35 +943,35 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1031,42 +1031,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1214,14 +1214,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1250,9 +1250,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1457,10 +1457,10 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > IAVF_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -1564,7 +1564,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 2b03dad8589c..1329a389f742 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -341,83 +341,83 @@ struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -435,7 +435,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -477,9 +477,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -497,7 +497,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -539,7 +539,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -573,57 +573,57 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV4) {
 		struct virtchnl_proto_hdrs hdr = {
 			.tunnel_level = TUNNEL_LEVEL_OUTER,
 			.count = 3,
@@ -641,7 +641,7 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV6) {
 		struct virtchnl_proto_hdrs hdr = {
 			.tunnel_level = TUNNEL_LEVEL_OUTER,
 			.count = 3,
@@ -804,28 +804,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -835,11 +835,11 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
@@ -847,17 +847,17 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -874,7 +874,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -882,15 +882,15 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
@@ -898,15 +898,15 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
@@ -914,46 +914,46 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -970,7 +970,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1067,10 +1067,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1081,27 +1081,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1111,9 +1111,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1130,15 +1130,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index e33fe4576b6e..4ff856fc82aa 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -609,7 +609,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d633..096be81e8a69 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 475070e036ef..8f9a397e4143 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -904,7 +904,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH ||
+				RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -957,7 +957,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					raw_desc_bh1, 1);
 
 			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 571161c0cdec..2329928c62cb 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1138,7 +1138,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-			    DEV_RX_OFFLOAD_RSS_HASH ||
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1191,7 +1191,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						 raw_desc_bh1, 1);
 
 				if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-						DEV_RX_OFFLOAD_RSS_HASH) {
+						RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1719,7 +1719,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e9055259b..58f928bdd7ca 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -818,7 +818,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 4c2e0c7216fd..ec53478083b4 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -807,7 +807,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index cab7c4da8759..6226aa5a80c2 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -66,7 +66,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	/* Check if the jumbo frame and maximum packet length are set
 	 * correctly.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (max_pkt_len <= ICE_ETH_MAX_LEN ||
 		    max_pkt_len > ICE_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -89,7 +89,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -559,7 +559,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -620,7 +620,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 
 	return 0;
@@ -635,8 +635,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -658,28 +658,28 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -896,42 +896,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -950,11 +950,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -981,8 +981,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 970461f3e90a..0dac1b92bfdb 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -135,29 +135,29 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -239,9 +239,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -338,7 +338,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -368,7 +368,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -441,7 +441,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a4cd39c954f1..d79cc549da19 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1449,9 +1449,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2809,16 +2809,16 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_FRAG_IPV6)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV6)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -2828,7 +2828,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2838,7 +2838,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2848,7 +2848,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -2859,7 +2859,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -2870,7 +2870,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -2881,7 +2881,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -2892,7 +2892,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -2903,7 +2903,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -2913,7 +2913,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -2923,7 +2923,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -2933,7 +2933,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -2943,7 +2943,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -2953,7 +2953,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -2963,7 +2963,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -2973,7 +2973,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2982,7 +2982,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3124,8 +3124,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3344,8 +3344,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3449,40 +3449,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3521,24 +3521,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3603,8 +3603,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3620,55 +3620,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -3767,10 +3767,10 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > ICE_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -4161,15 +4161,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -5244,7 +5244,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5268,7 +5268,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651c1c7f..1c4bc4e30349 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -115,19 +115,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 54d14dfcddfb..beb863f70568 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,80 +373,80 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -465,17 +465,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -640,51 +640,51 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
@@ -693,30 +693,30 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -725,10 +725,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -737,10 +737,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -752,15 +752,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
@@ -769,15 +769,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
@@ -786,15 +786,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
@@ -802,22 +802,22 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -851,7 +851,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -873,10 +873,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -888,9 +888,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -909,16 +909,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f047ee..63c07e001f07 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -280,7 +280,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 				   ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
 				   dev_data->dev_conf.rxmode.max_rx_pkt_len);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
 		    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -1103,7 +1103,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2780,7 +2780,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3254,7 +3254,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 9725ac018043..8c870354619e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -473,7 +473,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5bba9887d296..6d2038975830 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -584,7 +584,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -994,7 +994,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 2d8ef7dc8a93..a5b573c22da2 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 224a0954836b..d8f5a786efac 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -314,8 +314,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -325,7 +325,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -341,8 +341,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -480,12 +480,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -497,9 +497,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -532,7 +532,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -979,18 +979,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -1000,33 +1000,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1490,14 +1490,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1523,9 +1523,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -1603,11 +1603,11 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (mtu > RTE_ETHER_MTU) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= IGC_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~IGC_RCTL_LPE;
 	}
 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
@@ -2165,13 +2165,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2203,16 +2203,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2258,17 +2258,17 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
@@ -2314,17 +2314,17 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
@@ -2393,23 +2393,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2495,7 +2495,7 @@ igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 		return 0;
 
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0)
 		goto write_ext_vlan;
 
 	/* Update maximum packet length */
@@ -2528,7 +2528,7 @@ igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 		return 0;
 
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0)
 		goto write_ext_vlan;
 
 	/* Update maximum packet length */
@@ -2554,22 +2554,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2587,7 +2587,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 7b6c209df3b6..066792b8a2d8 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -59,38 +59,38 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_JUMBO_FRAME | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index b5489eedd220..82e7e084b41d 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -866,23 +866,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1056,10 +1056,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1099,7 +1099,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
 
 	/* Configure support of jumbo frames, if any. */
-	if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		rctl |= IGC_RCTL_LPE;
 
 		/*
@@ -1130,7 +1130,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1196,7 +1196,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1240,20 +1240,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1261,7 +1261,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1298,12 +1298,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2272,10 +2272,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index e6207939665e..824341fee3f6 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -397,17 +397,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -421,25 +421,25 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -474,9 +474,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -498,14 +498,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -629,17 +629,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -671,17 +671,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -853,15 +853,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -885,12 +885,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -907,7 +907,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index 431eda777b78..d4eb6c1d78be 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index b83ea1bcaa6a..0c1f6113d0e9 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -204,11 +204,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -745,11 +745,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 589d9fa5877d..2f6df2c2f6b8 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,31 +67,31 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2410,10 +2410,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2471,9 +2471,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2529,9 +2529,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2803,10 +2803,10 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
 
 	if (frame_size > IPN3KE_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
+			(uint64_t)(RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			(uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
+			(uint64_t)(~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b5371568b54d..3707daf4760f 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1865,7 +1865,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1880,7 +1880,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1967,10 +1967,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2091,7 +2091,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2108,7 +2108,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2130,17 +2130,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2151,19 +2151,19 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		ixgbe_vlan_hw_strip_config(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2202,10 +2202,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2229,18 +2229,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2250,12 +2250,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2264,12 +2264,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2284,13 +2284,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2299,15 +2299,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2316,39 +2316,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2357,7 +2357,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2381,8 +2381,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2627,15 +2627,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2712,17 +2712,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2736,7 +2736,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2754,17 +2754,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3839,7 +3839,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3849,9 +3849,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3890,21 +3890,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3973,9 +3973,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4218,11 +4218,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4244,8 +4244,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4281,37 +4281,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4528,7 +4528,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4747,13 +4747,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5199,11 +5199,11 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > IXGBE_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -5271,22 +5271,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5346,8 +5346,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5581,10 +5581,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5715,12 +5715,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5734,15 +5734,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -5753,8 +5753,8 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
-	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
-	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+	((mirror_type) & ~(uint8_t)(RTE_ETH_MIRROR_VIRTUAL_POOL_UP | \
+	RTE_ETH_MIRROR_UPLINK_PORT | RTE_ETH_MIRROR_DOWNLINK_PORT | RTE_ETH_MIRROR_VLAN))
 
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
@@ -5794,7 +5794,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) {
 		mirror_type |= IXGBE_MRCTL_VLME;
 		/* Check if vlan id is valid and find conresponding VLAN ID
 		 * index in VLVF
@@ -5827,7 +5827,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
 			mr_info->mr_conf[rule_id].vlan.vlan_mask =
 						mirror_conf->vlan.vlan_mask;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			for (i = 0; i < RTE_ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
 				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
 					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
 						mirror_conf->vlan.vlan_id[i];
@@ -5836,7 +5836,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 			mv_lsb = 0;
 			mv_msb = 0;
 			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+			for (i = 0; i < RTE_ETH_VMDQ_MAX_VLAN_FILTERS; i++)
 				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
 		}
 	}
@@ -5845,7 +5845,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 	 * if enable pool mirror, write related pool mask register,if disable
 	 * pool mirror, clear PFMRVM register
 	 */
-	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VIRTUAL_POOL_UP) {
 		mirror_type |= IXGBE_MRCTL_VPME;
 		if (on) {
 			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
@@ -5859,9 +5859,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 			mr_info->mr_conf[rule_id].pool_mask = 0;
 		}
 	}
-	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_UPLINK_PORT)
 		mirror_type |= IXGBE_MRCTL_UPME;
-	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_DOWNLINK_PORT)
 		mirror_type |= IXGBE_MRCTL_DPME;
 
 	/* read  mirror control register and recalculate it */
@@ -5882,13 +5882,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
 	/* write pool mirrror control  register */
-	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VIRTUAL_POOL_UP) {
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
 				mp_msb);
 	}
 	/* write VLAN mirrror control  register */
-	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) {
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
 				mv_msb);
@@ -6266,7 +6266,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) &&
 	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
@@ -6942,15 +6942,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7361,16 +7361,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7380,10 +7380,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7439,7 +7439,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7450,7 +7450,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7474,9 +7474,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7489,7 +7489,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7742,7 +7742,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7774,7 +7774,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7871,12 +7871,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7908,11 +7908,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a0ce18ca246b..3443154589e8 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -113,15 +113,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 511b612f7fe4..0557de6c1aa5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index fbf2b17d160f..d03238b728ba 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -107,15 +107,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -266,15 +266,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -604,11 +604,11 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 		hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 		if (max_frame > IXGBE_ETH_MAX_LEN) {
 			dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		} else {
 			dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 		}
 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -684,29 +684,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c814a28cb49a..b5ee83d8edc8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2591,26 +2591,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2778,7 +2778,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3014,7 +3014,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3025,20 +3025,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_JUMBO_FRAME |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3048,20 +3048,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3116,7 +3116,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3520,23 +3520,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3618,23 +3618,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3710,12 +3710,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3740,7 +3740,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3749,7 +3749,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3765,7 +3765,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3789,7 +3789,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3871,7 +3871,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3887,12 +3887,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3902,7 +3902,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3920,12 +3920,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3935,7 +3935,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3962,7 +3962,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3989,7 +3989,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4158,7 +4158,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4171,8 +4171,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4185,7 +4185,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4196,7 +4196,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4212,15 +4212,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && (j < RTE_ETH_DCB_NUM_USER_PRIORITIES))
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4270,7 +4270,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
 		}
 	}
@@ -4286,7 +4286,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4322,7 +4322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4336,7 +4336,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4357,12 +4357,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if ((dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB) &&
+	    (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB) &&
+	    (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS))
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4418,7 +4418,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4539,11 +4539,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4564,17 +4564,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4601,21 +4601,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4626,18 +4626,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4671,7 +4671,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4684,13 +4684,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4898,7 +4898,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4926,10 +4926,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4937,8 +4937,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4952,7 +4952,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4961,7 +4961,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5082,7 +5082,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5090,7 +5090,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -5119,7 +5119,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5128,7 +5128,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5171,11 +5171,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5190,7 +5190,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5200,7 +5200,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5406,9 +5406,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5696,7 +5696,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5745,7 +5745,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -5754,8 +5754,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 476ef62cfda2..220efffe4d08 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -226,7 +226,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index adba855ca30f..714707941537 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,7 +278,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index a8407e742e6d..c2ab3131f22e 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index d5b636a19408..536e33010703 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..bad6691648a1 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG,RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 871d11c4133d..29060ca76f93 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index b72060a4499b..118170670fbb 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -483,10 +483,10 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
 	if (frame_len > LIO_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
 	eth_dev->data->mtu = mtu;
@@ -616,17 +616,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -694,42 +694,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -778,7 +778,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -835,7 +835,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -933,10 +933,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -944,18 +944,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1124,10 +1124,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1509,7 +1509,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1530,11 +1530,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1746,9 +1746,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index f58ff4c0cb77..a117a05228fc 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index de6becd45e3e..ea66f5bfd452 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -1216,7 +1216,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1367,10 +1367,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 978cbb8201ea..9977c761880a 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,13 +682,13 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_JUMBO_FRAME |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -704,7 +704,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -831,7 +831,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 2df26842fbe4..19feec5e5202 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 5f8766aa481e..c40cda8fcaf9 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1343,8 +1343,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1627,7 +1627,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index f84e061fe719..ff1c8e17460a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1463,10 +1463,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e02714e23196..9588dff05180 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1226,7 +1226,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4762fa0f5f88..7048fff3883e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -272,7 +272,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -522,8 +522,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -531,11 +531,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -546,8 +546,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -555,11 +555,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -612,32 +612,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1048,7 +1048,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1601,14 +1601,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6364,8 +6364,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -6989,7 +6989,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8657,7 +8657,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 76ad53f2a1e8..d5d3a89374fe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -328,18 +328,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 3f6f5dcfbadb..02a337dc2c93 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10934,9 +10934,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10944,9 +10944,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10960,11 +10960,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10972,11 +10972,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14495,9 +14495,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14506,9 +14506,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14517,11 +14517,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14530,11 +14530,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14682,8 +14682,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index b93fd4d2c962..ef286a13729c 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1834,7 +1834,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1847,7 +1847,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..1ee014776643 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index abd8ce798986..0d6c58f47d89 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,23 +333,23 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -363,7 +363,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -695,7 +695,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1329,7 +1329,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1431,7 +1431,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1475,7 +1475,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pkt_len,
@@ -1490,7 +1490,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1551,9 +1551,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1562,11 +1562,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1596,7 +1596,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index eb4d34ca559e..06cdeba662bc 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,35 +98,35 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -979,9 +979,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 		txq_ctrl->txq.tso_en = 1;
 	}
 	txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
-	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				 DEV_TX_OFFLOAD_UDP_TNL_TSO |
-				 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+	txq_ctrl->txq.swp_en = ((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
 				txq_ctrl->txq.offloads) && config->swp;
 }
 
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 7e1df1c75147..578816fe0513 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -464,8 +464,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index a3ee15020466..37803fe34538 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,11 +126,11 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 MRVL_NETA_ETH_HDRS_LEN;
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -155,10 +155,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -510,28 +510,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index ef8067790f82..ccd47e8f4927 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,15 +54,15 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			    DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			    RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index dfa7ecc09039..d28125ce9635 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -735,7 +735,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 078aefbb8da4..539e196b807e 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,16 +58,16 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -443,14 +443,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -484,8 +484,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -496,7 +496,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 MRVL_PP2_ETH_HDRS_LEN;
 		if (dev->data->mtu > priv->max_mtu) {
@@ -508,7 +508,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -530,7 +530,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -632,7 +632,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -653,7 +653,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -673,14 +673,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -902,7 +902,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -938,11 +938,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1211,30 +1211,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1718,11 +1718,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1742,9 +1742,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1873,13 +1873,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1888,7 +1888,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2033,7 +2033,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2189,7 +2189,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2198,10 +2198,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2247,19 +2247,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2336,11 +2336,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3159,7 +3159,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..15645f1e5d2a 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index e3f7e636d731..cacb30385404 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 7e91d5984740..c2ff1c999869 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index d6d4ba9663c6..f19e9834848b 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 1b4bc33593fb..c526c949a64c 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,20 +359,20 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		hw->mtu = rxmode->max_rx_pkt_len;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -384,13 +384,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -398,7 +398,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -486,14 +486,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -505,15 +505,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -702,26 +702,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -758,25 +758,25 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	/* All NFP devices support jumbo frames */
-	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -847,7 +847,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -964,9 +964,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if ((uint32_t)mtu > RTE_ETHER_MTU)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* update max frame size */
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
@@ -990,12 +990,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1155,22 +1155,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1240,22 +1240,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 534a38c14f94..7a6a963bf6cc 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -140,7 +140,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index b697b55865cc..ac960328c7de 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -101,7 +101,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 508bafc12a14..789c6b9c4b9a 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
 			RTE_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -538,7 +538,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 9f4c0503b4d4..947dabdca2c5 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -534,13 +534,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -553,9 +553,9 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		return rc;
 
 	if (frame_size > OCCTX_L2_MAX_LEN)
-		nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -582,7 +582,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -854,10 +854,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1369,7 +1369,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index b73515de37ca..7215039507c3 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,24 +55,24 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_JUMBO_FRAME	 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 75d4cabf2e7c..ebe503438144 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -664,7 +664,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -691,7 +691,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -706,29 +706,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -767,43 +767,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -913,8 +913,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1857,21 +1857,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2244,7 +2244,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2573,8 +2573,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 7871e3d30bda..04e43b63c192 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,44 +117,44 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_JUMBO_FRAME	| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 5a4501208e9e..41761085e156 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -29,11 +29,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -59,9 +59,9 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		return rc;
 
 	if (frame_size > NIX_L2_MAX_LEN)
-		dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -590,17 +590,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index c2a36883cbf2..e1654ef5b284 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -890,8 +890,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -933,8 +933,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 63a33142a579..3fe6727f1d2a 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1188,7 +1188,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..cbc6d67a7fcf 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index ffeade5952dc..986902287b67 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ff299f00b913..c60190074926 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index a243683d61d3..7bfa6098e230 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,15 +33,15 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
-	devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+	devinfo->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index a7d433547e36..77593111f141 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,13 +954,13 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index a8774b7a432a..13d18e875444 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -135,10 +135,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -655,7 +655,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -710,7 +710,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index feec4d10a26e..a74f27bf8158 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -613,9 +613,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 323d46e6ebb2..0af2f919e9d5 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1313,12 +1313,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	}
 
 	/* If jumbo enabled adjust MTU */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		eth_dev->data->mtu =
 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
 			RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1327,8 +1327,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1391,35 +1391,35 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_JUMBO_FRAME |
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1431,17 +1431,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1468,10 +1468,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1480,11 +1480,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2019,12 +2019,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2048,13 +2048,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2095,14 +2095,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2228,7 +2228,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2289,7 +2289,7 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
@@ -2369,9 +2369,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		}
 	}
 	if (frame_size > QEDE_ETH_MAX_LEN)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (!dev->data->dev_started && restart) {
 		qede_dev_start(dev);
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..ceb47c17d0d6 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 298f4e3e4273..144dfef269f3 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 1faf38a714cf..8d1ef5fb22bc 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 274a98e228e4..d93f9d2418b9 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -81,13 +81,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -96,17 +96,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -337,10 +337,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -827,7 +827,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -836,8 +836,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d4cb96881cd2..ca8774ad0950 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -916,11 +916,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index 522e9a0d3470..7c91ee3fcb53 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -942,16 +942,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 2db0d000c3ad..8734bca4876f 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -102,19 +102,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -142,8 +142,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -912,16 +912,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -952,16 +952,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1070,7 +1070,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	 */
 	if (mtu > RTE_ETHER_MTU) {
 		struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	}
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
@@ -1247,7 +1247,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1472,9 +1472,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 4f5993a68d23..dc2cdfea13c4 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -390,7 +390,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index adb2b2cb8175..dea5272a79bc 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -387,7 +387,7 @@ sfc_port_configure(struct sfc_adapter *sa)
 
 	sfc_log_init(sa, "entry");
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		port->pdu = rxmode->max_rx_pkt_len;
 	else
 		port->pdu = EFX_MAC_PDU(dev_data->mtu);
@@ -577,66 +577,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 280e8a61f9e0..a83b47a8d111 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -647,9 +647,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -930,7 +930,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -940,7 +940,7 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
 {
 	uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
 
-	caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	caps |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	return caps & sfc_rx_get_offload_mask(sa);
 }
@@ -1141,7 +1141,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1167,15 +1167,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
@@ -1205,7 +1205,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1301,19 +1301,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1633,10 +1633,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1653,16 +1653,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1808,7 +1808,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 49b239f4d261..359acc71a47f 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -515,23 +515,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -862,9 +862,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1228,13 +1228,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 7416a6b1b816..255444a4181d 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1204,10 +1204,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index c515de3bf71d..ad5980ef5280 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1765,7 +1765,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1773,7 +1773,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2267,7 +2267,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2441,7 +2441,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index fc1844ddfce1..26861e4103d9 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -177,9 +177,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EINVAL;
 
 	if (frame_size > NIC_HW_L2_MAX_LEN)
-		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (nicvf_mbox_update_hw_max_frs(nic, mtu))
 		return -EINVAL;
@@ -404,35 +404,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -445,28 +445,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -821,9 +821,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -884,7 +884,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -1007,7 +1007,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1397,11 +1397,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1430,10 +1430,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1597,8 +1597,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1727,11 +1727,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU based on max_rx_pkt_len or default */
-	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
+	mtu = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME ?
 		dev->data->dev_conf.rxmode.max_rx_pkt_len
 			-  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
 
@@ -1914,8 +1914,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1927,8 +1927,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1938,7 +1938,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1973,7 +1973,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2050,8 +2050,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index b8dd905d0bd6..c1876bb9e1b7 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,33 +16,33 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_JUMBO_FRAME | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 006399468841..a42b7bfe55ae 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -997,7 +997,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1032,7 +1032,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1052,7 +1052,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1137,10 +1137,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1239,7 +1239,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1253,17 +1253,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1274,25 +1274,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1330,10 +1330,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1356,18 +1356,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1377,13 +1377,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1392,13 +1392,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1413,13 +1413,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1428,15 +1428,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1445,39 +1445,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1494,8 +1494,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1637,7 +1637,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	 *    - half duplex (checked afterwards for valid speeds)
 	 *    - fixed speed: TODO implement
 	 */
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -1704,15 +1704,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1773,8 +1773,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (*link_speeds & ~allowed_speeds) {
@@ -1783,20 +1783,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2611,7 +2611,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2644,11 +2644,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2705,10 +2705,10 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	hw->mac.get_link_status = true;
 
@@ -2722,8 +2722,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2742,34 +2742,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2994,7 +2994,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3225,13 +3225,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3363,10 +3363,10 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
@@ -3404,10 +3404,10 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
@@ -3593,12 +3593,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3622,15 +3622,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4281,15 +4281,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4645,7 +4645,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4656,7 +4656,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4680,9 +4680,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4695,7 +4695,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4925,7 +4925,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4956,7 +4956,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4996,7 +4996,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5004,7 +5004,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5012,7 +5012,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5020,7 +5020,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5052,7 +5052,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5062,7 +5062,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5072,7 +5072,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5082,7 +5082,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 3021933965c8..75a9e2580e27 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -56,15 +56,15 @@
 #define TXGBE_5TUPLE_MIN_PRI            1
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 18ed94bd277b..05773cb20786 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -491,14 +491,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -579,22 +579,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -652,8 +652,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -896,10 +896,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index 494d779a3c9d..44f6f103edd2 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -103,15 +103,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -258,13 +258,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -613,29 +613,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 1a261287d1bd..c302d49af728 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1939,7 +1939,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1949,35 +1949,35 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_JUMBO_FRAME |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2202,32 +2202,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2329,7 +2329,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2579,7 +2579,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2880,20 +2880,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2910,20 +2910,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2964,39 +2964,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3026,7 +3026,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3063,12 +3063,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3083,7 +3083,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3091,7 +3091,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3110,7 +3110,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3131,7 +3131,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3201,7 +3201,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3217,12 +3217,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3232,7 +3232,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3250,12 +3250,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3265,7 +3265,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3292,7 +3292,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3319,7 +3319,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3455,7 +3455,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3466,8 +3466,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3480,7 +3480,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3491,7 +3491,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3507,15 +3507,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3556,7 +3556,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3572,7 +3572,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3614,7 +3614,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3628,7 +3628,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3699,12 +3699,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3760,7 +3760,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3884,11 +3884,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3911,15 +3911,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3942,21 +3942,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3967,18 +3967,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4008,7 +4008,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4018,13 +4018,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4087,10 +4087,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4098,22 +4098,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4253,7 +4253,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4296,7 +4296,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4305,7 +4305,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
 			TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
 	} else {
@@ -4329,7 +4329,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4339,7 +4339,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4376,11 +4376,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4395,7 +4395,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4404,7 +4404,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4527,8 +4527,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4836,7 +4836,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4888,7 +4888,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (rxmode->max_rx_pkt_len +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4897,8 +4897,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5069,7 +5069,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a202931e9aed..778460aab5e1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -823,7 +823,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -858,7 +858,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1124,7 +1124,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1273,9 +1273,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e58085a2c95a..00bbbb2b3537 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -703,7 +703,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1763,7 +1763,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1777,7 +1777,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1876,7 +1876,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1948,22 +1948,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2079,14 +2079,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2104,20 +2104,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2129,15 +2129,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2149,12 +2149,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2207,7 +2207,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2234,10 +2234,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2401,7 +2401,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2440,28 +2440,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2474,8 +2474,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2485,8 +2485,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2508,33 +2508,33 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	return 0;
 }
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 1a3291273a11..825a6adfc2b1 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,21 +41,21 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_JUMBO_FRAME |   \
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |   \
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -399,9 +399,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -487,8 +487,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -548,7 +548,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -844,15 +844,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -864,7 +864,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -931,7 +931,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1040,9 +1040,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1372,7 +1372,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1454,10 +1454,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1510,7 +1510,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1580,8 +1580,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1590,8 +1590,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 59bee9723cfc..7588ba929b65 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 5cf53d4de825..0f2671f528f4 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 5251db0b1674..ecc6ef2965ee 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -71,12 +71,12 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -334,7 +334,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index f48400e21156..e4c627e203a4 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -116,18 +116,18 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -151,9 +151,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -243,9 +243,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 1b1029660e77..e6af8420e4c6 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,16 +80,16 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -127,9 +127,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 21ed85c7d6c9..5053d174335c 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index f70ab0cc9e38..3ac98add5692 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,14 +283,14 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -312,12 +312,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index ca6cd200caad..5780928d75ee 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,14 +614,14 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -643,9 +643,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index db71f5aa0401..f44ee65372ff 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -218,9 +218,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index 29fb4b3d55ef..150406e385d4 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index 0c413180f889..94e3ac91b299 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,13 +819,13 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -853,9 +853,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index f24536972084..aa41fcc1d037 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -148,14 +148,14 @@ static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -624,7 +624,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 16bcffe356bc..8e974a8d0a92 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 8645ac790be4..8aabea002bbb 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -161,22 +161,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME),
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -740,7 +740,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1097,9 +1097,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index f252d34985b4..73932564e459 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -234,20 +234,20 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1456,10 +1456,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1908,7 +1908,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2211,12 +2211,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 
 	frame_size = MTU_TO_FRAMELEN(mtu_size);
 	if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	local_port_conf.rxmode.max_rx_pkt_len = frame_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2239,12 +2239,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2302,7 +2302,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2663,7 +2663,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index cc527d7f6b38..96fb325ff180 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -112,11 +112,11 @@ static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
+		.offloads = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -620,7 +620,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index beabb3c848aa..81124dc0dc88 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -95,7 +95,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -608,9 +608,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -688,7 +688,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
 	memcpy(&conf, &port_conf, sizeof(conf));
 	/* Set new MTU */
 	if (new_mtu > RTE_ETHER_MAX_LEN)
-		conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* mtu + length of header + length of FCS = max pkt length */
 	conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 5f539c458cdd..89489843e2bd 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -216,12 +216,12 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1809,7 +1809,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2633,9 +2633,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index b8c1e02d7598..80a72f7095cf 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -15,7 +15,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -23,9 +23,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -61,9 +61,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index bbb4a27a6d54..2e50339afb61 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -726,7 +726,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -869,9 +869,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 4e1a17cfe4f5..d228a842788d 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -478,7 +478,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -650,9 +650,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 911e40c66e0e..b4a69dde63dc 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -95,7 +95,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -606,7 +606,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index a1f457b564b6..9323426e9b1d 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -124,20 +124,20 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1815,9 +1815,9 @@ parse_args(int argc, char **argv)
 
 			printf("jumbo frame is enabled\n");
 			port_conf.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			port_conf.txmode.offloads |=
-					DEV_TX_OFFLOAD_MULTI_SEGS;
+					RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, then use the
@@ -1970,7 +1970,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2080,9 +2080,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index a0de8ca9b42d..278fe95970f3 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,18 +111,18 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -494,8 +494,8 @@ parse_args(int argc, char **argv)
 			const struct option lenopts = {"max-pkt-len",
 						       required_argument, 0, 0};
 
-			port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-			port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+			port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+			port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, use the default
@@ -628,7 +628,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -807,9 +807,9 @@ main(int argc, char **argv)
 		       nb_rx_queue, n_tx_queue);
 
 		rte_eth_dev_info_get(portid, &dev_info);
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index aa7b8db44ae8..85609e9d4593 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -250,19 +250,19 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -1961,9 +1961,9 @@ parse_args(int argc, char **argv)
 
 				printf("jumbo frame is enabled \n");
 				port_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 				port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MULTI_SEGS;
+						RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 				/**
 				 * if no max-pkt-len set, use the default value
@@ -2222,7 +2222,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2622,9 +2622,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 00ac267af1dd..500444565463 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,19 +120,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -703,8 +703,8 @@ parse_args(int argc, char **argv)
 				"max-pkt-len", required_argument, 0, 0
 			};
 
-			port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-			port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+			port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+			port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, use the default
@@ -926,7 +926,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1035,15 +1035,15 @@ l3fwd_poll_resource_setup(void)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 7470aa539a90..7c1214512983 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -147,7 +147,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? \
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -507,7 +507,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -634,9 +634,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 1ad71ca7ec5f..23307073c904 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -94,7 +94,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -213,7 +213,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 01dc3acf34d5..85955375f1bf 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -176,18 +176,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -218,9 +218,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -392,7 +392,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d2fe9f6b50d8..eb15899c902f 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 2f593abf263d..86671655b432 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -307,19 +307,19 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -2988,9 +2988,9 @@ parse_args(int argc, char **argv)
 
 			printf("jumbo frame is enabled - disabling simple TX path\n");
 			port_conf.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			port_conf.txmode.offloads |=
-					DEV_TX_OFFLOAD_MULTI_SEGS;
+					RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/* if no max-pkt-len set, use the default value
 			 * RTE_ETHER_MAX_LEN
@@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3577,9 +3577,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 467cda5a6dac..2e68a3870a09 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 4f32ade7fbf7..db32b0d6c427 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -197,14 +197,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 7ffccc8369dc..5ef14c176b11 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,19 +51,19 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -333,8 +333,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -379,8 +379,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1abe003fc6ae..e750928fb89d 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -61,7 +61,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -106,9 +106,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6f20f98b2b30..08df716dc0fb 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -145,17 +145,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 9ebd88bac20e..074fee5b26b2 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -96,7 +96,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -115,9 +115,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -277,7 +277,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index ae08261befd7..737df4ca2a17 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -55,9 +55,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index bc3d71c8984e..b1d363ae21db 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -109,23 +109,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -133,7 +133,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -290,9 +290,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -562,8 +562,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
@@ -638,7 +638,7 @@ us_vhost_parse_args(int argc, char **argv)
 			mergeable = !!ret;
 			if (ret) {
 				vmdq_conf_default.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 				vmdq_conf_default.rxmode.max_rx_pkt_len
 					= JUMBO_FRAME_MAX_SIZE;
 			}
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 7d5bf6855426..dddcde40efe2 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -78,9 +78,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -278,7 +278,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index d3bc19f78ee5..16782a5d850f 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -66,12 +66,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -79,7 +79,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -157,11 +157,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -259,9 +259,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 685a03bdd194..f58625a76227 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -60,8 +60,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -69,11 +69,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -81,7 +81,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -89,12 +89,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -103,7 +103,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -157,7 +157,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++){
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -173,11 +173,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -271,9 +271,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -390,9 +390,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -412,9 +412,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9d95cd11e1b5..2be877d048cf 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -98,9 +98,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -126,14 +123,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1184,32 +1181,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1458,7 +1455,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If jumbo frames are enabled, check that the maximum RX packet
 	 * length is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
 			RTE_ETHDEV_LOG(ERR,
 				"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
@@ -1491,7 +1488,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1543,12 +1540,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2157,7 +2154,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -2752,21 +2749,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2790,14 +2787,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3663,7 +3660,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3750,44 +3747,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3832,17 +3829,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -3919,7 +3916,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -4116,7 +4113,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4142,7 +4139,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4283,8 +4280,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -4548,21 +4545,21 @@ rte_eth_mirror_rule_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (mirror_conf->dst_pool >= ETH_64_POOLS) {
+	if (mirror_conf->dst_pool >= RTE_ETH_64_POOLS) {
 		RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
-			ETH_64_POOLS - 1);
+			RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
-	if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
-	     ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
+	if ((mirror_conf->rule_type & (RTE_ETH_MIRROR_VIRTUAL_POOL_UP |
+	     RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
 	    (mirror_conf->pool_mask == 0)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Invalid mirror pool, pool mask can not be 0\n");
 		return -EINVAL;
 	}
 
-	if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
+	if ((mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) &&
 	    mirror_conf->vlan.vlan_mask == 0) {
 		RTE_ETHDEV_LOG(ERR,
 			"Invalid vlan mask, vlan mask can not be 0\n");
@@ -6238,7 +6235,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index d2b27c351fdb..3e4109491316 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -249,7 +249,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -279,42 +279,74 @@ struct rte_eth_stats {
 /**
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG	RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED	RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD	RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M	RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD	RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M	RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G	RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G	RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G	RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G	RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G	RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G	RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G	RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G	RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G	RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G	RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G	RTE_ETH_LINK_SPEED_200G
 
 /**
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE	RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M	RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M	RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G	RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G	RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G	RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G	RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G	RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G	RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G	RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G	RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G	RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G	RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G	RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN	RTE_ETH_SPEED_NUM_UNKNOWN
 
 /**
  * A structure used to retrieve link-level information of an Ethernet port.
@@ -328,12 +360,18 @@ struct rte_eth_link {
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX	RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX	RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN		RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP		RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED		RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG	RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 
 /**
@@ -349,9 +387,12 @@ struct rte_eth_thresh {
 /**
  *  Simple flags are used for rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1
-#define ETH_MQ_RX_DCB_FLAG  0x2
-#define ETH_MQ_RX_VMDQ_FLAG 0x4
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG	RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG	RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG	RTE_ETH_MQ_RX_VMDQ_FLAG
 
 /**
  *  A set of values to identify what method is to be used to route
@@ -359,50 +400,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB,RSS or VMDQ mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For RX side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For RX side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDQ, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDQ */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDQ+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDQ and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the RX features of an Ethernet port.
@@ -415,7 +455,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -430,12 +470,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a vlan filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -506,37 +551,68 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               (1ULL << 2)
-#define ETH_RSS_FRAG_IPV4          (1ULL << 3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
-#define ETH_RSS_IPV6               (1ULL << 8)
-#define ETH_RSS_FRAG_IPV6          (1ULL << 9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
-#define ETH_RSS_L2_PAYLOAD         (1ULL << 14)
-#define ETH_RSS_IPV6_EX            (1ULL << 15)
-#define ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
-#define ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
-#define ETH_RSS_PORT               (1ULL << 18)
-#define ETH_RSS_VXLAN              (1ULL << 19)
-#define ETH_RSS_GENEVE             (1ULL << 20)
-#define ETH_RSS_NVGRE              (1ULL << 21)
-#define ETH_RSS_GTPU               (1ULL << 23)
-#define ETH_RSS_ETH                (1ULL << 24)
-#define ETH_RSS_S_VLAN             (1ULL << 25)
-#define ETH_RSS_C_VLAN             (1ULL << 26)
-#define ETH_RSS_ESP                (1ULL << 27)
-#define ETH_RSS_AH                 (1ULL << 28)
-#define ETH_RSS_L2TPV3             (1ULL << 29)
-#define ETH_RSS_PFCP               (1ULL << 30)
-#define ETH_RSS_PPPOE		   (1ULL << 31)
-#define ETH_RSS_ECPRI		   (1ULL << 32)
-#define ETH_RSS_MPLS		   (1ULL << 33)
+#define RTE_ETH_RSS_IPV4               (1ULL << 2)
+#define ETH_RSS_IPV4		RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          (1ULL << 3)
+#define ETH_RSS_FRAG_IPV4	RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_TCP	RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_UDP	RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP	RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER	RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               (1ULL << 8)
+#define ETH_RSS_IPV6		RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          (1ULL << 9)
+#define ETH_RSS_FRAG_IPV6	RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_TCP	RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_UDP	RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP	RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER	RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         (1ULL << 14)
+#define ETH_RSS_L2_PAYLOAD	RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            (1ULL << 15)
+#define ETH_RSS_IPV6_EX		RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
+#define ETH_RSS_IPV6_TCP_EX	RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
+#define ETH_RSS_IPV6_UDP_EX	RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               (1ULL << 18)
+#define ETH_RSS_PORT		RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              (1ULL << 19)
+#define ETH_RSS_VXLAN		RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             (1ULL << 20)
+#define ETH_RSS_GENEVE		RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              (1ULL << 21)
+#define ETH_RSS_NVGRE		RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               (1ULL << 23)
+#define ETH_RSS_GTPU		RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                (1ULL << 24)
+#define ETH_RSS_ETH		RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             (1ULL << 25)
+#define ETH_RSS_S_VLAN		RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             (1ULL << 26)
+#define ETH_RSS_C_VLAN		RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                (1ULL << 27)
+#define ETH_RSS_ESP		RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 (1ULL << 28)
+#define ETH_RSS_AH		RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             (1ULL << 29)
+#define ETH_RSS_L2TPV3		RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               (1ULL << 30)
+#define ETH_RSS_PFCP		RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              (1ULL << 31)
+#define ETH_RSS_PPPOE		RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              (1ULL << 32)
+#define ETH_RSS_ECPRI		RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               (1ULL << 33)
+#define ETH_RSS_MPLS		RTE_ETH_RSS_MPLS
 
 /*
  * We use the following macros to combine with above ETH_RSS_* for
@@ -547,12 +623,18 @@ struct rte_eth_rss_conf {
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
-#define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
-#define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
-#define ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
-#define ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
+#define ETH_RSS_L3_SRC_ONLY	RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        (1ULL << 62)
+#define ETH_RSS_L3_DST_ONLY	RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
+#define ETH_RSS_L4_SRC_ONLY	RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
+#define ETH_RSS_L2_SRC_ONLY	RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define ETH_RSS_L2_DST_ONLY	RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
@@ -580,22 +662,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT	RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST	RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST	RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK	RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)	RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -619,213 +706,277 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /**< Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64	RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128	RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256	RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512	RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE	RTE_ETH_RETA_GROUP_SIZE
 
 /* Definitions used for VMDQ and DCB functionality */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS	RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES	RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES	RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES	RTE_ETH_DCB_NUM_QUEUES
 
 /* DCB capability defines */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT	RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT	RTE_ETH_DCB_PFC_SUPPORT
 
 /* Definitions used for VLAN Offload functionality */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD	RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD	RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD	RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD	RTE_ETH_QINQ_STRIP_OFFLOAD
 
 /* Definitions used for mask VLAN setting */
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK	RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK	RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK	RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK	RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX		RTE_ETH_VLAN_ID_MAX
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR	RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY	RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /* Definitions used for VMDQ pool rx mode setting */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG	RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC	RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC	RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST	RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST	RTE_ETH_VMDQ_ACCEPT_MULTICAST
 
 /** Maximum nb. of vlan per mirror rule */
-#define ETH_MIRROR_MAX_VLANS       64
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS	RTE_ETH_MIRROR_MAX_VLANS
 
-#define ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
-#define ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
-#define ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
-#define ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
-#define ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP	RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT	RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT	RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN		RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN	RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
 
 /**
  * A structure used to configure VLAN traffic mirror of an Ethernet port.
@@ -865,20 +1016,26 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDQ configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
@@ -964,7 +1121,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1048,7 +1205,7 @@ struct rte_eth_rxconf {
 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1077,7 +1234,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1188,12 +1345,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
-	RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+	RTE_ETH_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1224,18 +1386,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1243,11 +1416,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in RX descriptors.
@@ -1264,9 +1442,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** RX queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1275,6 +1453,8 @@ struct rte_fdir_conf {
 	/**< Flex payload configuration. */
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1292,7 +1472,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1301,6 +1481,8 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the RX multi-queue mode, extra advanced
@@ -1348,39 +1530,60 @@ struct rte_eth_conf {
 /**
  * RX offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_JUMBO_FRAME	0x00000800
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP  0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP	RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM	RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO     0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO		RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP  0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP	RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP	RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT	0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT	RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER	RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	0x00000800
+#define DEV_RX_OFFLOAD_JUMBO_FRAME	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME
+#define RTE_ETH_RX_OFFLOAD_SCATTER	0x00002000
+#define DEV_RX_OFFLOAD_SCATTER		RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP	0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP	RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY     0x00008000
+#define DEV_RX_OFFLOAD_SECURITY		RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC	0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC		RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH	0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH	RTE_ETH_RX_OFFLOAD_RSS_HASH
 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
 
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1390,52 +1593,74 @@ struct rte_eth_conf {
 /**
  * TX offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT	RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM	RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO     0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO		RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO     0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO		RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT	RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT	RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS	RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**< Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 /**< Device supports optimization for fast release of mbufs.
  *   When set application must guarantee that per-queue all mbufs comes from
  *   the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY	RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO	RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP	RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1672,8 +1897,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS	RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL	RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1749,13 +1976,17 @@ struct rte_eth_fec_capa {
  */
 
 /**< l2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK	RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /**< l2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK	RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /**< l2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK	RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /**< l2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK	RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 
 /**
  * Function type used for RX packet processing packet callbacks.
@@ -2075,7 +2306,7 @@ uint16_t rte_eth_dev_count_total(void);
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2085,7 +2316,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2179,7 +2410,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -5231,7 +5462,7 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index edf96de2dc2e..8e6156a62aa9 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -154,7 +154,7 @@ struct rte_eth_dev_data {
 			/**< Device Ethernet link address.
 			 *   @see rte_eth_dev_release_port()
 			 */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 			/**< Bitmap associating MAC addresses to pools. */
 	struct rte_ether_addr *hash_mac_addrs;
 			/**< Device Ethernet MAC addresses of hash filtering.
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index 70f455d47d60..4152067368b8 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2593,7 +2593,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index bb38d7f58102..50e611e887bf 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -192,7 +192,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -215,7 +215,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -258,7 +258,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -271,7 +271,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index 13f06d8ed25b..be43f8c328e1 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
@ 2021-08-27  7:59   ` Andrew Rybchenko
  2021-08-27 20:24     ` Ferruh Yigit
  2021-08-28 14:26     ` Ajit Khaparde
  2021-08-30  9:41   ` David Marchand
  2021-08-30 17:19   ` [dpdk-dev] [PATCH v3] " Ferruh Yigit
  2 siblings, 2 replies; 32+ messages in thread
From: Andrew Rybchenko @ 2021-08-27  7:59 UTC (permalink / raw)
  To: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang
  Cc: dev, Tyler Retzlaff

On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> way. The macros for backward compatibility can be removed in next LTS.
> 
> Internal components switched to new enum & macro names.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>

Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>

I think that it should be pushed as early as possible in
the release cycle.

The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-27  7:59   ` Andrew Rybchenko
@ 2021-08-27 20:24     ` Ferruh Yigit
  2021-08-28 14:26     ` Ajit Khaparde
  1 sibling, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-08-27 20:24 UTC (permalink / raw)
  To: Andrew Rybchenko, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang
  Cc: dev, Tyler Retzlaff

On 8/27/2021 8:59 AM, Andrew Rybchenko wrote:
> On 8/27/21 4:19 AM, Ferruh Yigit wrote:
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>> way. The macros for backward compatibility can be removed in next LTS.
>>
>> Internal components switched to new enum & macro names.
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> 
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> 
> I think that it should be pushed as early as possible in
> the release cycle.
> 

ack


> The changeset definitely deserves entry in release notes.

and ack. I will send a new version with release notes update on Monday.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-27  7:59   ` Andrew Rybchenko
  2021-08-27 20:24     ` Ferruh Yigit
@ 2021-08-28 14:26     ` Ajit Khaparde
  2021-08-29  7:47       ` Jerin Jacob
  2021-08-29  8:17       ` Wisam Monther
  1 sibling, 2 replies; 32+ messages in thread
From: Ajit Khaparde @ 2021-08-28 14:26 UTC (permalink / raw)
  To: Andrew Rybchenko
  Cc: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang, dpdk-dev, Tyler Retzlaff

[-- Attachment #1: Type: text/plain, Size: 729 bytes --]

On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
<andrew.rybchenko@oktetlabs.ru> wrote:
>
> On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> > way. The macros for backward compatibility can be removed in next LTS.
> >
> > Internal components switched to new enum & macro names.
> >
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>

>
> I think that it should be pushed as early as possible in
> the release cycle.
>
> The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-28 14:26     ` Ajit Khaparde
@ 2021-08-29  7:47       ` Jerin Jacob
  2021-08-29  8:17       ` Wisam Monther
  1 sibling, 0 replies; 32+ messages in thread
From: Jerin Jacob @ 2021-08-29  7:47 UTC (permalink / raw)
  To: Ajit Khaparde
  Cc: Andrew Rybchenko, Ferruh Yigit, Maryam Tahhan, Reshma Pattan,
	Jerin Jacob, Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li,
	Thomas Monjalon, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang, dpdk-dev, Tyler Retzlaff

On Sat, Aug 28, 2021 at 7:56 PM Ajit Khaparde
<ajit.khaparde@broadcom.com> wrote:
>
> On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru> wrote:
> >
> > On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> > > way. The macros for backward compatibility can be removed in next LTS.
> > >
> > > Internal components switched to new enum & macro names.
> > >
> > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> >
> > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>

>
> >
> > I think that it should be pushed as early as possible in
> > the release cycle.
> >
> > The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-28 14:26     ` Ajit Khaparde
  2021-08-29  7:47       ` Jerin Jacob
@ 2021-08-29  8:17       ` Wisam Monther
  2021-08-30  2:13         ` Xu, Rosen
  2021-08-30  6:24         ` Hemant Agrawal
  1 sibling, 2 replies; 32+ messages in thread
From: Wisam Monther @ 2021-08-29  8:17 UTC (permalink / raw)
  To: Ajit Khaparde, Andrew Rybchenko
  Cc: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Cristian Dumitrescu, Xiaoyun Li, NBU-Contact-Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Shahaf Shuler, Slava Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, NBU-Contact-longli,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dpdk-dev, Tyler Retzlaff

Hi,

> -----Original Message-----
> From: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Sent: Saturday, August 28, 2021 5:26 PM
> To: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Cc: Ferruh Yigit <ferruh.yigit@intel.com>; Maryam Tahhan
> <maryam.tahhan@intel.com>; Reshma Pattan <reshma.pattan@intel.com>;
> Jerin Jacob <jerinj@marvell.com>; Wisam Monther <wisamm@nvidia.com>;
> Cristian Dumitrescu <cristian.dumitrescu@intel.com>; Xiaoyun Li
> <xiaoyun.li@intel.com>; NBU-Contact-Thomas Monjalon
> <thomas@monjalon.net>; Jay Jayatheerthan
> <jay.jayatheerthan@intel.com>; Chas Williams <chas3@att.com>; Min Hu
> (Connor) <humin29@huawei.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Somnath Kotur <somnath.kotur@broadcom.com>; John Daley
> <johndale@cisco.com>; Hyong Youb Kim <hyonkim@cisco.com>; Qi Zhang
> <qi.z.zhang@intel.com>; Xiao Wang <xiao.w.wang@intel.com>; Beilei Xing
> <beilei.xing@intel.com>; Haiyue Wang <haiyue.wang@intel.com>; Matan
> Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Keith Wiles <keith.wiles@intel.com>;
> Jiayu Hu <jiayu.hu@intel.com>; Olivier Matz <olivier.matz@6wind.com>; Ori
> Kam <orika@nvidia.com>; Akhil Goyal <gakhil@marvell.com>; Declan
> Doherty <declan.doherty@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Radu
> Nicolau <radu.nicolau@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Nithin Dabilpuram
> <ndabilpuram@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>;
> Sunil Kumar Kori <skori@marvell.com>; Satha Rao
> <skoteshwar@marvell.com>; John W. Linville <linville@tuxdriver.com>; Ciara
> Loftus <ciara.loftus@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <irusskikh@marvell.com>; Steven Webster
> <steven.webster@windriver.com>; Matt Peters
> <matt.peters@windriver.com>; Somalapuram Amaranath
> <asomalap@amd.com>; Rasesh Mody <rmody@marvell.com>; Shahed
> Shaikh <shshaikh@marvell.com>; Bruce Richardson
> <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.ananyev@intel.com>; Ruifeng Wang <ruifeng.wang@arm.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Marcin Wojtas
> <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Shai Brandes
> <shaibran@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Gagandeep Singh
> <g.singh@nxp.com>; Gaetan Rivet <grive@u256.net>; Ziyang Xuan
> <xuanziyang2@huawei.com>; Xiaoyun Wang
> <cloud.wangxiaoyun@huawei.com>; Guoyang Zhou
> <zhouguoyang@huawei.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Lijun Ou <oulijun@huawei.com>; Jingjing Wu <jingjing.wu@intel.com>;
> Qiming Yang <qiming.yang@intel.com>; Andrew Boyer
> <aboyer@pensando.io>; Rosen Xu <rosen.xu@intel.com>;
> Srisivasubramanian Srinivasan <srinivasan@marvell.com>; Jakub Grajciar
> <jgrajcia@cisco.com>; Zyta Szpak <zr@semihalf.com>; Liron Himi
> <lironh@marvell.com>; Stephen Hemminger <sthemmin@microsoft.com>;
> NBU-Contact-longli <longli@microsoft.com>; Martin Spinler
> <spinler@cesnet.cz>; Heinrich Kuhn <heinrich.kuhn@netronome.com>;
> Jiawen Wu <jiawenwu@trustnetic.com>; Tetsuya Mukawa
> <mtetsuyah@gmail.com>; Harman Kalra <hkalra@marvell.com>; Anoob
> Joseph <anoobj@marvell.com>; Nalla Pradeep <pnalla@marvell.com>;
> Radha Mohan Chintakuntla <radhac@marvell.com>; Veerasenareddy Burru
> <vburru@marvell.com>; Devendra Singh Rawat
> <dsinghrawat@marvell.com>; Jasvinder Singh <jasvinder.singh@intel.com>;
> Maciej Czekaj <mczekaj@marvell.com>; Jian Wang
> <jianwang@trustnetic.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Chenbo Xia <chenbo.xia@intel.com>;
> Yong Wang <yongwang@vmware.com>; Nicolas Chautru
> <nicolas.chautru@intel.com>; David Hunt <david.hunt@intel.com>; Harry
> van Haaren <harry.van.haaren@intel.com>; Bernard Iremonger
> <bernard.iremonger@intel.com>; Anatoly Burakov
> <anatoly.burakov@intel.com>; John McNamara
> <john.mcnamara@intel.com>; Kirill Rybalchenko
> <kirill.rybalchenko@intel.com>; Byron Marohn <byron.marohn@intel.com>;
> Yipeng Wang <yipeng1.wang@intel.com>; dpdk-dev <dev@dpdk.org>; Tyler
> Retzlaff <roretzla@linux.microsoft.com>
> Subject: Re: [PATCH v2] ethdev: add namespace
> 
> On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru> wrote:
> >
> > On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > > Add 'RTE_ETH' namespace to all enums & macros in a backward
> compatible
> > > way. The macros for backward compatibility can be removed in next LTS.
> > >
> > > Internal components switched to new enum & macro names.
> > >
> > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> >
> > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>

Acked-by: Wisam Jaddo <wisamm@nvidia.com>

> 
> >
> > I think that it should be pushed as early as possible in
> > the release cycle.
> >
> > The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-29  8:17       ` Wisam Monther
@ 2021-08-30  2:13         ` Xu, Rosen
  2021-08-30  5:27           ` Xia, Chenbo
  2021-08-30  6:24         ` Hemant Agrawal
  1 sibling, 1 reply; 32+ messages in thread
From: Xu, Rosen @ 2021-08-30  2:13 UTC (permalink / raw)
  To: Wisam Monther, Ajit Khaparde, Andrew Rybchenko
  Cc: Yigit, Ferruh, Tahhan, Maryam, Pattan, Reshma, Jerin Jacob,
	Dumitrescu, Cristian, Li, Xiaoyun, NBU-Contact-Thomas Monjalon,
	Jayatheerthan, Jay, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, Daley, John,
	Hyong Youb Kim, Zhang, Qi Z, Wang, Xiao W, Xing, Beilei, Wang,
	Haiyue, Matan Azrad, Shahaf Shuler, Slava Ovsiienko, Wiles,
	Keith, Hu, Jiayu, Olivier Matz, Ori Kam, Akhil Goyal, Doherty,
	Declan, Ray Kinsella, Nicolau, Radu, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Loftus, Ciara,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Richardson, Bruce, Ananyev, Konstantin,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Wu, Jingjing, Yang, Qiming, Andrew Boyer,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, NBU-Contact-longli,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Singh, Jasvinder, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Xia, Chenbo, Yong Wang, Chautru, Nicolas, Hunt,
	David, Van Haaren, Harry, Iremonger, Bernard, Burakov, Anatoly,
	Mcnamara, John, Rybalchenko, Kirill, Marohn, Byron, Wang,
	Yipeng1, dpdk-dev, Tyler Retzlaff

Hi,

> -----Original Message-----
> From: Wisam Monther <wisamm@nvidia.com>
> Sent: Sunday, August 29, 2021 16:18
> To: Ajit Khaparde <ajit.khaparde@broadcom.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; Tahhan, Maryam
> <maryam.tahhan@intel.com>; Pattan, Reshma <reshma.pattan@intel.com>;
> Jerin Jacob <jerinj@marvell.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>; NBU-
> Contact-Thomas Monjalon <thomas@monjalon.net>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>; Chas Williams <chas3@att.com>; Min Hu
> (Connor) <humin29@huawei.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Somnath Kotur <somnath.kotur@broadcom.com>; Daley, John
> <johndale@cisco.com>; Hyong Youb Kim <hyonkim@cisco.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Slava Ovsiienko <viacheslavo@nvidia.com>; Wiles, Keith
> <keith.wiles@intel.com>; Hu, Jiayu <jiayu.hu@intel.com>; Olivier Matz
> <olivier.matz@6wind.com>; Ori Kam <orika@nvidia.com>; Akhil Goyal
> <gakhil@marvell.com>; Doherty, Declan <declan.doherty@intel.com>; Ray
> Kinsella <mdr@ashroe.eu>; Nicolau, Radu <radu.nicolau@intel.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Nithin Dabilpuram
> <ndabilpuram@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>;
> Sunil Kumar Kori <skori@marvell.com>; Satha Rao
> <skoteshwar@marvell.com>; John W. Linville <linville@tuxdriver.com>;
> Loftus, Ciara <ciara.loftus@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <irusskikh@marvell.com>; Steven Webster
> <steven.webster@windriver.com>; Matt Peters
> <matt.peters@windriver.com>; Somalapuram Amaranath
> <asomalap@amd.com>; Rasesh Mody <rmody@marvell.com>; Shahed
> Shaikh <shshaikh@marvell.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Ruifeng Wang <ruifeng.wang@arm.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Marcin Wojtas
> <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Shai Brandes
> <shaibran@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Gagandeep Singh
> <g.singh@nxp.com>; Gaetan Rivet <grive@u256.net>; Ziyang Xuan
> <xuanziyang2@huawei.com>; Xiaoyun Wang
> <cloud.wangxiaoyun@huawei.com>; Guoyang Zhou
> <zhouguoyang@huawei.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Lijun Ou <oulijun@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Yang, Qiming <qiming.yang@intel.com>; Andrew Boyer
> <aboyer@pensando.io>; Xu, Rosen <rosen.xu@intel.com>;
> Srisivasubramanian Srinivasan <srinivasan@marvell.com>; Jakub Grajciar
> <jgrajcia@cisco.com>; Zyta Szpak <zr@semihalf.com>; Liron Himi
> <lironh@marvell.com>; Stephen Hemminger <sthemmin@microsoft.com>;
> NBU-Contact-longli <longli@microsoft.com>; Martin Spinler
> <spinler@cesnet.cz>; Heinrich Kuhn <heinrich.kuhn@netronome.com>;
> Jiawen Wu <jiawenwu@trustnetic.com>; Tetsuya Mukawa
> <mtetsuyah@gmail.com>; Harman Kalra <hkalra@marvell.com>; Anoob
> Joseph <anoobj@marvell.com>; Nalla Pradeep <pnalla@marvell.com>;
> Radha Mohan Chintakuntla <radhac@marvell.com>; Veerasenareddy Burru
> <vburru@marvell.com>; Devendra Singh Rawat
> <dsinghrawat@marvell.com>; Singh, Jasvinder <jasvinder.singh@intel.com>;
> Maciej Czekaj <mczekaj@marvell.com>; Jian Wang
> <jianwang@trustnetic.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>;
> Yong Wang <yongwang@vmware.com>; Chautru, Nicolas
> <nicolas.chautru@intel.com>; Hunt, David <david.hunt@intel.com>; Van
> Haaren, Harry <harry.van.haaren@intel.com>; Iremonger, Bernard
> <bernard.iremonger@intel.com>; Burakov, Anatoly
> <anatoly.burakov@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>; Rybalchenko, Kirill
> <kirill.rybalchenko@intel.com>; Marohn, Byron <byron.marohn@intel.com>;
> Wang, Yipeng1 <yipeng1.wang@intel.com>; dpdk-dev <dev@dpdk.org>;
> Tyler Retzlaff <roretzla@linux.microsoft.com>
> Subject: RE: [PATCH v2] ethdev: add namespace
> 
> Hi,
> 
> > -----Original Message-----
> > From: Ajit Khaparde <ajit.khaparde@broadcom.com>
> > Sent: Saturday, August 28, 2021 5:26 PM
> > To: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> > Cc: Ferruh Yigit <ferruh.yigit@intel.com>; Maryam Tahhan
> > <maryam.tahhan@intel.com>; Reshma Pattan
> <reshma.pattan@intel.com>;
> > Jerin Jacob <jerinj@marvell.com>; Wisam Monther
> <wisamm@nvidia.com>;
> > Cristian Dumitrescu <cristian.dumitrescu@intel.com>; Xiaoyun Li
> > <xiaoyun.li@intel.com>; NBU-Contact-Thomas Monjalon
> > <thomas@monjalon.net>; Jay Jayatheerthan
> > <jay.jayatheerthan@intel.com>; Chas Williams <chas3@att.com>; Min Hu
> > (Connor) <humin29@huawei.com>; Pavan Nikhilesh
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Somnath Kotur <somnath.kotur@broadcom.com>; John Daley
> > <johndale@cisco.com>; Hyong Youb Kim <hyonkim@cisco.com>; Qi Zhang
> > <qi.z.zhang@intel.com>; Xiao Wang <xiao.w.wang@intel.com>; Beilei Xing
> > <beilei.xing@intel.com>; Haiyue Wang <haiyue.wang@intel.com>; Matan
> > Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Slava
> > Ovsiienko <viacheslavo@nvidia.com>; Keith Wiles
> > <keith.wiles@intel.com>; Jiayu Hu <jiayu.hu@intel.com>; Olivier Matz
> > <olivier.matz@6wind.com>; Ori Kam <orika@nvidia.com>; Akhil Goyal
> > <gakhil@marvell.com>; Declan Doherty <declan.doherty@intel.com>; Ray
> > Kinsella <mdr@ashroe.eu>; Radu Nicolau <radu.nicolau@intel.com>;
> > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > <sachin.saxena@oss.nxp.com>; Nithin Dabilpuram
> > <ndabilpuram@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>;
> > Sunil Kumar Kori <skori@marvell.com>; Satha Rao
> > <skoteshwar@marvell.com>; John W. Linville <linville@tuxdriver.com>;
> > Ciara Loftus <ciara.loftus@intel.com>; Shepard Siegel
> > <shepard.siegel@atomicrules.com>; Ed Czeck
> <ed.czeck@atomicrules.com>;
> > John Miller <john.miller@atomicrules.com>; Igor Russkikh
> > <irusskikh@marvell.com>; Steven Webster
> > <steven.webster@windriver.com>; Matt Peters
> > <matt.peters@windriver.com>; Somalapuram Amaranath
> <asomalap@amd.com>;
> > Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>;
> > Bruce Richardson <bruce.richardson@intel.com>; Konstantin Ananyev
> > <konstantin.ananyev@intel.com>; Ruifeng Wang
> <ruifeng.wang@arm.com>;
> > Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Marcin Wojtas
> > <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Shai Brandes
> > <shaibran@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor
> > Chauskin <igorch@amazon.com>; Gagandeep Singh <g.singh@nxp.com>;
> > Gaetan Rivet <grive@u256.net>; Ziyang Xuan <xuanziyang2@huawei.com>;
> > Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>; Guoyang Zhou
> > <zhouguoyang@huawei.com>; Yisen Zhuang
> <yisen.zhuang@huawei.com>;
> > Lijun Ou <oulijun@huawei.com>; Jingjing Wu <jingjing.wu@intel.com>;
> > Qiming Yang <qiming.yang@intel.com>; Andrew Boyer
> > <aboyer@pensando.io>; Rosen Xu <rosen.xu@intel.com>;
> > Srisivasubramanian Srinivasan <srinivasan@marvell.com>; Jakub Grajciar
> > <jgrajcia@cisco.com>; Zyta Szpak <zr@semihalf.com>; Liron Himi
> > <lironh@marvell.com>; Stephen Hemminger <sthemmin@microsoft.com>;
> > NBU-Contact-longli <longli@microsoft.com>; Martin Spinler
> > <spinler@cesnet.cz>; Heinrich Kuhn <heinrich.kuhn@netronome.com>;
> > Jiawen Wu <jiawenwu@trustnetic.com>; Tetsuya Mukawa
> > <mtetsuyah@gmail.com>; Harman Kalra <hkalra@marvell.com>; Anoob
> Joseph
> > <anoobj@marvell.com>; Nalla Pradeep <pnalla@marvell.com>; Radha
> Mohan
> > Chintakuntla <radhac@marvell.com>; Veerasenareddy Burru
> > <vburru@marvell.com>; Devendra Singh Rawat
> <dsinghrawat@marvell.com>;
> > Jasvinder Singh <jasvinder.singh@intel.com>; Maciej Czekaj
> > <mczekaj@marvell.com>; Jian Wang <jianwang@trustnetic.com>; Maxime
> > Coquelin <maxime.coquelin@redhat.com>; Chenbo Xia
> > <chenbo.xia@intel.com>; Yong Wang <yongwang@vmware.com>; Nicolas
> > Chautru <nicolas.chautru@intel.com>; David Hunt
> > <david.hunt@intel.com>; Harry van Haaren <harry.van.haaren@intel.com>;
> > Bernard Iremonger <bernard.iremonger@intel.com>; Anatoly Burakov
> > <anatoly.burakov@intel.com>; John McNamara
> <john.mcnamara@intel.com>;
> > Kirill Rybalchenko <kirill.rybalchenko@intel.com>; Byron Marohn
> > <byron.marohn@intel.com>; Yipeng Wang <yipeng1.wang@intel.com>;
> > dpdk-dev <dev@dpdk.org>; Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Subject: Re: [PATCH v2] ethdev: add namespace
> >
> > On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
> > <andrew.rybchenko@oktetlabs.ru> wrote:
> > >
> > > On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > > > Add 'RTE_ETH' namespace to all enums & macros in a backward
> > compatible
> > > > way. The macros for backward compatibility can be removed in next LTS.
> > > >
> > > > Internal components switched to new enum & macro names.
> > > >
> > > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > > > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > >
> > > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> > Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> 
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>

Acked-by: Rosen Xu <rosen.xu@intel.com>

> >
> > >
> > > I think that it should be pushed as early as possible in the release
> > > cycle.
> > >
> > > The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-30  2:13         ` Xu, Rosen
@ 2021-08-30  5:27           ` Xia, Chenbo
  0 siblings, 0 replies; 32+ messages in thread
From: Xia, Chenbo @ 2021-08-30  5:27 UTC (permalink / raw)
  To: Yigit, Ferruh
  Cc: Tahhan, Maryam, Pattan, Reshma, Jerin Jacob, Dumitrescu,
	Cristian, Li, Xiaoyun, NBU-Contact-Thomas Monjalon,
	Jayatheerthan, Jay, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, Daley, John,
	Hyong Youb Kim, Zhang, Qi Z, Wang, Xiao W, Xing, Beilei, Wang,
	Haiyue, Matan Azrad, Shahaf Shuler, Slava Ovsiienko, Wiles,
	Keith, Hu, Jiayu, Olivier Matz, Ori Kam, Akhil Goyal, Doherty,
	Declan, Ray Kinsella, Nicolau, Radu, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Xu, Rosen, Wisam Monther, Ajit Khaparde,
	Andrew Rybchenko, Satha Rao, John W. Linville, Loftus, Ciara,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Richardson, Bruce, Ananyev, Konstantin,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Wu, Jingjing, Yang, Qiming, Andrew Boyer,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, NBU-Contact-longli,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Singh, Jasvinder, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Yong Wang, Chautru, Nicolas, Hunt,  David,
	Van Haaren, Harry, Iremonger, Bernard, Burakov, Anatoly,
	Mcnamara, John, Rybalchenko, Kirill, Marohn, Byron, Wang,
	Yipeng1, dpdk-dev, Tyler Retzlaff

> -----Original Message-----
> From: Xu, Rosen <rosen.xu@intel.com>
> > > On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
> > > <andrew.rybchenko@oktetlabs.ru> wrote:
> > > >
> > > > On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > > > > Add 'RTE_ETH' namespace to all enums & macros in a backward
> > > compatible
> > > > > way. The macros for backward compatibility can be removed in next LTS.
> > > > >
> > > > > Internal components switched to new enum & macro names.
> > > > >
> > > > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > > > > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > > >
> > > > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> > > Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> >
> > Acked-by: Wisam Jaddo <wisamm@nvidia.com>
> 
> Acked-by: Rosen Xu <rosen.xu@intel.com>

For virtio/vhost part:

Acked-by: Chenbo Xia <chenbo.xia@intel.com>

> 
> > >
> > > >
> > > > I think that it should be pushed as early as possible in the release
> > > > cycle.
> > > >
> > > > The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-29  8:17       ` Wisam Monther
  2021-08-30  2:13         ` Xu, Rosen
@ 2021-08-30  6:24         ` Hemant Agrawal
  1 sibling, 0 replies; 32+ messages in thread
From: Hemant Agrawal @ 2021-08-30  6:24 UTC (permalink / raw)
  To: Wisam Monther, Ajit Khaparde, Andrew Rybchenko
  Cc: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Cristian Dumitrescu, Xiaoyun Li, NBU-Contact-Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Somnath Kotur, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Shahaf Shuler, Slava Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Sachin Saxena (OSS),
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, NBU-Contact-longli,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dpdk-dev, Tyler Retzlaff

> > On Fri, Aug 27, 2021 at 12:59 AM Andrew Rybchenko
> > <andrew.rybchenko@oktetlabs.ru> wrote:
> > >
> > > On 8/27/21 4:19 AM, Ferruh Yigit wrote:
> > > > Add 'RTE_ETH' namespace to all enums & macros in a backward
> > compatible
> > > > way. The macros for backward compatibility can be removed in next
> LTS.
> > > >
> > > > Internal components switched to new enum & macro names.
> > > >
> > > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > > > Acked-By: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > >
> > > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> > Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> 
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> 
> >
> > >
> > > I think that it should be pushed as early as possible in the release
> > > cycle.
> > >
> > > The changeset definitely deserves entry in release notes.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
  2021-08-27  7:59   ` Andrew Rybchenko
@ 2021-08-30  9:41   ` David Marchand
  2021-08-30 17:01     ` Ferruh Yigit
  2021-08-30 17:19   ` [dpdk-dev] [PATCH v3] " Ferruh Yigit
  2 siblings, 1 reply; 32+ messages in thread
From: David Marchand @ 2021-08-30  9:41 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dev, Tyler Retzlaff

Hi Ferruh,

On Fri, Aug 27, 2021 at 3:26 AM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible

For completeness, there are some struct renames too in this patch.


> way. The macros for backward compatibility can be removed in next LTS.

Olivier had provided a script when he prefixed librte_net macros.
https://inbox.dpdk.org/dev/20190529144602.5tpfb5p3yasz3tvl@platinum/

Did you use a script to do those renames and can you share it?
Or maybe we can simply reuse Olivier script.


>
> Internal components switched to new enum & macro names.

I found remaining references for macros and structs.
Enums look fine.

With this patch applied:

# For structs:
$ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-struct
/ {print $2}' |sort -u); do git grep -w $word |grep -v '#define
'$word'[[:space:]]'; done
lib/ethdev/rte_ethdev.h:    struct rte_fdir_conf fdir_conf; /**< FDIR
configuration. DEPRECATED */
lib/ethdev/rte_ethdev.h:    struct rte_intr_conf intr_conf; /**<
Interrupt mode configuration. */

# Enums look fine:
$ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-enum /
{print $2}' |sort -u); do git grep -w $word |grep -v '#define
'$word'[[:space:]]'; done
$ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk
'/^-\t(RTE|ETH)_[^ ,]*,/ {print $2}' |sort -u); do word=${word%%,};
git grep -w $word |grep -v '#define '$word'[[:space:]]'; done
$ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk
'/^-\t(RTE|ETH)_[^ ,]* = / {print $2}' |sort -u); do git grep -w $word
|grep -v '#define '$word'[[:space:]]'; done

# For macros:
$ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-#define
/ {print $2}' |sort -u); do git grep -w $word |grep -v '#define
'$word'[[:space:]]' && echo; done
doc/guides/rel_notes/release_18_11.rst:  To request keeping CRC,
application should set ``DEV_RX_OFFLOAD_KEEP_CRC``

doc/guides/rel_notes/release_19_11.rst:  * Added new Rx offload flag
``DEV_RX_OFFLOAD_RSS_HASH`` which can be used to

lib/ethdev/rte_ethdev.h:    } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
lib/ethdev/rte_ethdev.h:    } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
lib/ethdev/rte_ethdev.h:    uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX
BW percentage for each TC */

lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
lib/ethdev/rte_ethdev.h:    uint8_t
prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */

[snipped the rest of the output] etc...


-- 
David Marchand


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: add namespace
  2021-08-30  9:41   ` David Marchand
@ 2021-08-30 17:01     ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-08-30 17:01 UTC (permalink / raw)
  To: David Marchand
  Cc: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dev, Tyler Retzlaff

On 8/30/2021 10:41 AM, David Marchand wrote:
> Hi Ferruh,
> 
> On Fri, Aug 27, 2021 at 3:26 AM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>>
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> 
> For completeness, there are some struct renames too in this patch.
> 

ack, will add to the commit log. Also fixing some checkpatch warnings and noting
this in the commit log.

> 
>> way. The macros for backward compatibility can be removed in next LTS.
> 
> Olivier had provided a script when he prefixed librte_net macros.
> https://inbox.dpdk.org/dev/20190529144602.5tpfb5p3yasz3tvl@platinum/
> 
> Did you use a script to do those renames and can you share it?
> Or maybe we can simply reuse Olivier script.
> 

Using Olivier script seems was good idea, but unfortunately I updated manually.

> 
>>
>> Internal components switched to new enum & macro names.
> 
> I found remaining references for macros and structs.
> Enums look fine.
> 

Thanks for catching these, I am updating patch to fix them.

> With this patch applied:
> 
> # For structs:
> $ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-struct
> / {print $2}' |sort -u); do git grep -w $word |grep -v '#define
> '$word'[[:space:]]'; done
> lib/ethdev/rte_ethdev.h:    struct rte_fdir_conf fdir_conf; /**< FDIR
> configuration. DEPRECATED */
> lib/ethdev/rte_ethdev.h:    struct rte_intr_conf intr_conf; /**<
> Interrupt mode configuration. */
> 
> # Enums look fine:
> $ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-enum /
> {print $2}' |sort -u); do git grep -w $word |grep -v '#define
> '$word'[[:space:]]'; done
> $ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk
> '/^-\t(RTE|ETH)_[^ ,]*,/ {print $2}' |sort -u); do word=${word%%,};
> git grep -w $word |grep -v '#define '$word'[[:space:]]'; done
> $ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk
> '/^-\t(RTE|ETH)_[^ ,]* = / {print $2}' |sort -u); do git grep -w $word
> |grep -v '#define '$word'[[:space:]]'; done
> 
> # For macros:
> $ for word in $(git show -- 'lib/ethdev/rte_ethdev.h' |awk '/^-#define
> / {print $2}' |sort -u); do git grep -w $word |grep -v '#define
> '$word'[[:space:]]' && echo; done
> doc/guides/rel_notes/release_18_11.rst:  To request keeping CRC,
> application should set ``DEV_RX_OFFLOAD_KEEP_CRC``
> 
> doc/guides/rel_notes/release_19_11.rst:  * Added new Rx offload flag
> ``DEV_RX_OFFLOAD_RSS_HASH`` which can be used to
> 
> lib/ethdev/rte_ethdev.h:    } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
> lib/ethdev/rte_ethdev.h:    } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
> lib/ethdev/rte_ethdev.h:    uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX
> BW percentage for each TC */
> 
> lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
> lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
> lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
> lib/ethdev/rte_ethdev.h:    uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
> lib/ethdev/rte_ethdev.h:    uint8_t
> prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
> 
> [snipped the rest of the output] etc...
> 
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3] ethdev: add namespace
  2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
  2021-08-27  7:59   ` Andrew Rybchenko
  2021-08-30  9:41   ` David Marchand
@ 2021-08-30 17:19   ` Ferruh Yigit
  2021-08-31  7:59     ` Thomas Monjalon
  2021-10-18 15:43     ` [dpdk-dev] [PATCH v4] " Ferruh Yigit
  2 siblings, 2 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-08-30 17:19 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff, David Marchand

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Cc: David Marchand <david.marchand@redhat.com>

v2:
* Updated internal components
* Removed deprecation notice

v3:
* Updated missing macros / structs that David highlighted
* Added release notes update
---
 app/proc-info/main.c                          |   8 +-
 app/test-eventdev/test_perf_common.c          |   4 +-
 app/test-eventdev/test_pipeline_common.c      |  12 +-
 app/test-flow-perf/config.h                   |   2 +-
 app/test-pipeline/init.c                      |   8 +-
 app/test-pmd/cmdline.c                        | 298 +++---
 app/test-pmd/config.c                         | 202 ++--
 app/test-pmd/csumonly.c                       |  28 +-
 app/test-pmd/flowgen.c                        |   6 +-
 app/test-pmd/macfwd.c                         |   6 +-
 app/test-pmd/macswap_common.h                 |   6 +-
 app/test-pmd/parameters.c                     |  54 +-
 app/test-pmd/testpmd.c                        |  60 +-
 app/test-pmd/testpmd.h                        |   2 +-
 app/test-pmd/txonly.c                         |   6 +-
 app/test/test_ethdev_link.c                   |  68 +-
 app/test/test_event_eth_rx_adapter.c          |   4 +-
 app/test/test_kni.c                           |   2 +-
 app/test/test_link_bonding.c                  |   4 +-
 app/test/test_link_bonding_mode4.c            |   4 +-
 app/test/test_link_bonding_rssconf.c          |  28 +-
 app/test/test_pmd_perf.c                      |  12 +-
 app/test/virtual_pmd.c                        |  10 +-
 doc/guides/eventdevs/cnxk.rst                 |   2 +-
 doc/guides/eventdevs/octeontx2.rst            |   2 +-
 doc/guides/howto/debug_troubleshoot.rst       |   2 +-
 doc/guides/nics/bnxt.rst                      |  26 +-
 doc/guides/nics/enic.rst                      |   2 +-
 doc/guides/nics/features.rst                  | 116 +-
 doc/guides/nics/fm10k.rst                     |   6 +-
 doc/guides/nics/intel_vf.rst                  |  10 +-
 doc/guides/nics/ixgbe.rst                     |  12 +-
 doc/guides/nics/mlx5.rst                      |   4 +-
 doc/guides/nics/tap.rst                       |   2 +-
 .../generic_segmentation_offload_lib.rst      |   8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |  18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |   8 +-
 doc/guides/prog_guide/rte_flow.rst            |  34 +-
 doc/guides/prog_guide/rte_security.rst        |   2 +-
 doc/guides/rel_notes/deprecation.rst          |  12 +-
 doc/guides/rel_notes/release_21_11.rst        |   3 +
 doc/guides/sample_app_ug/ipsec_secgw.rst      |   4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |   2 +-
 drivers/bus/dpaa/include/process.h            |  16 +-
 drivers/common/cnxk/roc_npc.h                 |   2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |  16 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |  12 +-
 drivers/net/ark/ark_ethdev.c                  |  16 +-
 drivers/net/atlantic/atl_ethdev.c             |  90 +-
 drivers/net/atlantic/atl_ethdev.h             |  18 +-
 drivers/net/atlantic/atl_rxtx.c               |   6 +-
 drivers/net/avp/avp_ethdev.c                  |  26 +-
 drivers/net/axgbe/axgbe_dev.c                 |   6 +-
 drivers/net/axgbe/axgbe_ethdev.c              | 110 +-
 drivers/net/axgbe/axgbe_ethdev.h              |  12 +-
 drivers/net/axgbe/axgbe_mdio.c                |   2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |   6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |  16 +-
 drivers/net/bnxt/bnxt.h                       |  68 +-
 drivers/net/bnxt/bnxt_ethdev.c                | 178 ++--
 drivers/net/bnxt/bnxt_flow.c                  |   4 +-
 drivers/net/bnxt/bnxt_hwrm.c                  | 112 +-
 drivers/net/bnxt/bnxt_reps.c                  |   2 +-
 drivers/net/bnxt/bnxt_ring.c                  |   4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |  28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |   4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |   2 +-
 drivers/net/bnxt/bnxt_txr.c                   |   4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |  30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |   8 +-
 drivers/net/bonding/eth_bond_private.h        |   4 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |  16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |   6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |  56 +-
 drivers/net/cnxk/cn10k_ethdev.c               |  38 +-
 drivers/net/cnxk/cn10k_rx.c                   |   4 +-
 drivers/net/cnxk/cn10k_tx.c                   |   4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |  56 +-
 drivers/net/cnxk/cn9k_rx.c                    |   4 +-
 drivers/net/cnxk/cn9k_tx.c                    |   4 +-
 drivers/net/cnxk/cnxk_ethdev.c                |  84 +-
 drivers/net/cnxk/cnxk_ethdev.h                |  49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |   6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            | 112 +-
 drivers/net/cnxk/cnxk_link.c                  |  14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |   4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |   2 +-
 drivers/net/cxgbe/cxgbe.h                     |  48 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |  50 +-
 drivers/net/cxgbe/cxgbe_main.c                |  12 +-
 drivers/net/cxgbe/sge.c                       |   2 +-
 drivers/net/dpaa/dpaa_ethdev.c                | 190 ++--
 drivers/net/dpaa/dpaa_ethdev.h                |  10 +-
 drivers/net/dpaa/dpaa_flow.c                  |  32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |  34 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              | 148 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |  12 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   8 +-
 drivers/net/e1000/e1000_ethdev.h              |  18 +-
 drivers/net/e1000/em_ethdev.c                 |  68 +-
 drivers/net/e1000/em_rxtx.c                   |  48 +-
 drivers/net/e1000/igb_ethdev.c                | 166 +--
 drivers/net/e1000/igb_pf.c                    |   2 +-
 drivers/net/e1000/igb_rxtx.c                  | 120 +--
 drivers/net/ena/ena_ethdev.c                  |  70 +-
 drivers/net/ena/ena_ethdev.h                  |   4 +-
 drivers/net/ena/ena_rss.c                     |  76 +-
 drivers/net/enetc/enetc_ethdev.c              |  38 +-
 drivers/net/enic/enic.h                       |   2 +-
 drivers/net/enic/enic_ethdev.c                |  88 +-
 drivers/net/enic/enic_main.c                  |  40 +-
 drivers/net/enic/enic_res.c                   |  52 +-
 drivers/net/failsafe/failsafe.c               |   8 +-
 drivers/net/failsafe/failsafe_intr.c          |   4 +-
 drivers/net/failsafe/failsafe_ops.c           |  82 +-
 drivers/net/fm10k/fm10k.h                     |   4 +-
 drivers/net/fm10k/fm10k_ethdev.c              | 148 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |   6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |  22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          | 142 +--
 drivers/net/hinic/hinic_pmd_rx.c              |  36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |  22 +-
 drivers/net/hns3/hns3_dcb.c                   |  14 +-
 drivers/net/hns3/hns3_ethdev.c                | 360 +++----
 drivers/net/hns3/hns3_ethdev.h                |  12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             | 108 +-
 drivers/net/hns3/hns3_flow.c                  |   6 +-
 drivers/net/hns3/hns3_ptp.c                   |   2 +-
 drivers/net/hns3/hns3_rss.c                   | 108 +-
 drivers/net/hns3/hns3_rss.h                   |  28 +-
 drivers/net/hns3/hns3_rxtx.c                  |  30 +-
 drivers/net/hns3/hns3_rxtx.h                  |   2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |  10 +-
 drivers/net/i40e/i40e_ethdev.c                | 278 ++---
 drivers/net/i40e/i40e_ethdev.h                |  24 +-
 drivers/net/i40e/i40e_ethdev_vf.c             | 118 +--
 drivers/net/i40e/i40e_flow.c                  |   2 +-
 drivers/net/i40e/i40e_hash.c                  | 156 +--
 drivers/net/i40e/i40e_pf.c                    |  14 +-
 drivers/net/i40e/i40e_rxtx.c                  |  10 +-
 drivers/net/i40e/i40e_rxtx.h                  |   4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |   2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |   8 +-
 drivers/net/i40e/i40e_vf_representor.c        |  48 +-
 drivers/net/iavf/iavf.h                       |  24 +-
 drivers/net/iavf/iavf_ethdev.c                | 186 ++--
 drivers/net/iavf/iavf_hash.c                  | 300 +++---
 drivers/net/iavf/iavf_rxtx.c                  |   2 +-
 drivers/net/iavf/iavf_rxtx.h                  |  24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |   4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |   6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   2 +-
 drivers/net/ice/ice_dcf.c                     |   2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |  90 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |  58 +-
 drivers/net/ice/ice_ethdev.c                  | 190 ++--
 drivers/net/ice/ice_ethdev.h                  |  26 +-
 drivers/net/ice/ice_hash.c                    | 268 ++---
 drivers/net/ice/ice_rxtx.c                    |   8 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |   4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |  26 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |   2 +-
 drivers/net/igc/igc_ethdev.c                  | 146 +--
 drivers/net/igc/igc_ethdev.h                  |  56 +-
 drivers/net/igc/igc_txrx.c                    |  50 +-
 drivers/net/ionic/ionic_ethdev.c              | 140 +--
 drivers/net/ionic/ionic_ethdev.h              |  12 +-
 drivers/net/ionic/ionic_lif.c                 |  36 +-
 drivers/net/ionic/ionic_rxtx.c                |  10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |  70 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              | 313 +++---
 drivers/net/ixgbe/ixgbe_ethdev.h              |  18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |  24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |   2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |  12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |  38 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                | 253 +++--
 drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |   2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |  16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |  16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |  14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |   4 +-
 drivers/net/kni/rte_eth_kni.c                 |   8 +-
 drivers/net/liquidio/lio_ethdev.c             | 118 +--
 drivers/net/memif/memif_socket.c              |   2 +-
 drivers/net/memif/rte_eth_memif.c             |  14 +-
 drivers/net/mlx4/mlx4_ethdev.c                |  32 +-
 drivers/net/mlx4/mlx4_flow.c                  |  30 +-
 drivers/net/mlx4/mlx4_intr.c                  |   8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |  20 +-
 drivers/net/mlx4/mlx4_txq.c                   |  24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |  54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |   6 +-
 drivers/net/mlx5/mlx5.c                       |   4 +-
 drivers/net/mlx5/mlx5.h                       |   2 +-
 drivers/net/mlx5/mlx5_defs.h                  |   6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |   6 +-
 drivers/net/mlx5/mlx5_flow.c                  |  54 +-
 drivers/net/mlx5/mlx5_flow.h                  |  12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |  44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |   4 +-
 drivers/net/mlx5/mlx5_rss.c                   |  10 +-
 drivers/net/mlx5/mlx5_rxq.c                   |  42 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |   8 +-
 drivers/net/mlx5/mlx5_tx.c                    |  30 +-
 drivers/net/mlx5/mlx5_txq.c                   |  52 +-
 drivers/net/mlx5/mlx5_vlan.c                  |   4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |   4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |  34 +-
 drivers/net/mvneta/mvneta_ethdev.h            |  12 +-
 drivers/net/mvneta/mvneta_rxtx.c              |   2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               | 116 +-
 drivers/net/netvsc/hn_ethdev.c                |  70 +-
 drivers/net/netvsc/hn_rndis.c                 |  50 +-
 drivers/net/nfb/nfb_ethdev.c                  |  20 +-
 drivers/net/nfb/nfb_rx.c                      |   2 +-
 drivers/net/nfp/nfp_common.c                  | 130 +--
 drivers/net/nfp/nfp_ethdev.c                  |   2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |   2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |  50 +-
 drivers/net/null/rte_eth_null.c               |  28 +-
 drivers/net/octeontx/octeontx_ethdev.c        |  78 +-
 drivers/net/octeontx/octeontx_ethdev.h        |  32 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |  26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |  96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |  66 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |  12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |  18 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |   8 +-
 drivers/net/octeontx2/otx2_flow.c             |   2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |  36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |   4 +-
 drivers/net/octeontx2/otx2_link.c             |  40 +-
 drivers/net/octeontx2/otx2_mcast.c            |   2 +-
 drivers/net/octeontx2/otx2_ptp.c              |   4 +-
 drivers/net/octeontx2/otx2_rss.c              |  70 +-
 drivers/net/octeontx2/otx2_rx.c               |   4 +-
 drivers/net/octeontx2/otx2_tx.c               |   2 +-
 drivers/net/octeontx2/otx2_vlan.c             |  42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |   8 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |   8 +-
 drivers/net/pcap/pcap_ethdev.c                |  12 +-
 drivers/net/pfe/pfe_ethdev.c                  |  18 +-
 drivers/net/qede/base/mcp_public.h            |   4 +-
 drivers/net/qede/qede_ethdev.c                | 152 +--
 drivers/net/qede/qede_filter.c                |  10 +-
 drivers/net/qede/qede_rxtx.c                  |   2 +-
 drivers/net/qede/qede_rxtx.h                  |  16 +-
 drivers/net/ring/rte_eth_ring.c               |  20 +-
 drivers/net/sfc/sfc.c                         |  30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |  10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |  20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |   4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |   8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |  32 +-
 drivers/net/sfc/sfc_ethdev.c                  |  52 +-
 drivers/net/sfc/sfc_flow.c                    |   2 +-
 drivers/net/sfc/sfc_port.c                    |  54 +-
 drivers/net/sfc/sfc_rx.c                      |  52 +-
 drivers/net/sfc/sfc_tx.c                      |  50 +-
 drivers/net/softnic/rte_eth_softnic.c         |  12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |  14 +-
 drivers/net/tap/rte_eth_tap.c                 | 104 +-
 drivers/net/tap/tap_rss.h                     |   2 +-
 drivers/net/thunderx/nicvf_ethdev.c           | 108 +-
 drivers/net/thunderx/nicvf_ethdev.h           |  42 +-
 drivers/net/txgbe/txgbe_ethdev.c              | 244 ++---
 drivers/net/txgbe/txgbe_ethdev.h              |  18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |  24 +-
 drivers/net/txgbe/txgbe_fdir.c                |  20 +-
 drivers/net/txgbe/txgbe_flow.c                |   2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |  12 +-
 drivers/net/txgbe/txgbe_pf.c                  |  34 +-
 drivers/net/txgbe/txgbe_rxtx.c                | 312 +++---
 drivers/net/txgbe/txgbe_rxtx.h                |   4 +-
 drivers/net/txgbe/txgbe_tm.c                  |  16 +-
 drivers/net/vhost/rte_eth_vhost.c             |  16 +-
 drivers/net/virtio/virtio_ethdev.c            | 126 +--
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |  74 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |  16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |  16 +-
 examples/bbdev_app/main.c                     |   6 +-
 examples/bond/main.c                          |  14 +-
 examples/distributor/main.c                   |  12 +-
 examples/ethtool/ethtool-app/main.c           |   2 +-
 examples/ethtool/lib/rte_ethtool.c            |  18 +-
 .../pipeline_worker_generic.c                 |  16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |  12 +-
 examples/flow_classify/flow_classify.c        |   4 +-
 examples/flow_filtering/main.c                |  16 +-
 examples/ioat/ioatfwd.c                       |   8 +-
 examples/ip_fragmentation/main.c              |  14 +-
 examples/ip_pipeline/link.c                   |  20 +-
 examples/ip_reassembly/main.c                 |  20 +-
 examples/ipsec-secgw/ipsec-secgw.c            |  34 +-
 examples/ipsec-secgw/sa.c                     |   8 +-
 examples/ipv4_multicast/main.c                |   8 +-
 examples/kni/main.c                           |  12 +-
 examples/l2fwd-crypto/main.c                  |  10 +-
 examples/l2fwd-event/l2fwd_common.c           |  10 +-
 examples/l2fwd-event/main.c                   |   2 +-
 examples/l2fwd-jobstats/main.c                |   8 +-
 examples/l2fwd-keepalive/main.c               |   8 +-
 examples/l2fwd/main.c                         |   8 +-
 examples/l3fwd-acl/main.c                     |  20 +-
 examples/l3fwd-graph/main.c                   |  16 +-
 examples/l3fwd-power/main.c                   |  18 +-
 examples/l3fwd/l3fwd_event.c                  |   4 +-
 examples/l3fwd/main.c                         |  20 +-
 examples/link_status_interrupt/main.c         |  10 +-
 .../client_server_mp/mp_server/init.c         |   4 +-
 examples/multi_process/symmetric_mp/main.c    |  14 +-
 examples/ntb/ntb_fwd.c                        |   6 +-
 examples/packet_ordering/main.c               |   4 +-
 .../performance-thread/l3fwd-thread/main.c    |  18 +-
 examples/pipeline/obj.c                       |  20 +-
 examples/ptpclient/ptpclient.c                |  10 +-
 examples/qos_meter/main.c                     |  16 +-
 examples/qos_sched/init.c                     |   6 +-
 examples/rxtx_callbacks/main.c                |   8 +-
 examples/server_node_efd/server/init.c        |   8 +-
 examples/skeleton/basicfwd.c                  |   4 +-
 examples/vhost/main.c                         |  28 +-
 examples/vm_power_manager/main.c              |   6 +-
 examples/vmdq/main.c                          |  20 +-
 examples/vmdq_dcb/main.c                      |  40 +-
 lib/ethdev/rte_ethdev.c                       | 193 ++--
 lib/ethdev/rte_ethdev.h                       | 997 +++++++++++-------
 lib/ethdev/rte_ethdev_core.h                  |   2 +-
 lib/ethdev/rte_flow.h                         |   2 +-
 lib/gso/rte_gso.c                             |  20 +-
 lib/gso/rte_gso.h                             |   4 +-
 lib/mbuf/rte_mbuf_core.h                      |   8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |   2 +-
 339 files changed, 6728 insertions(+), 6500 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index a8e928fa9ff3..963b6aa5c589 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index cc100650c21e..41e92143121b 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,14 +668,14 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 6ee530d4cdc9..96c8a5828364 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -199,7 +199,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 	port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
 	if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	t->internal_port = 1;
 	RTE_ETH_FOREACH_DEV(i) {
@@ -224,7 +224,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -234,9 +234,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 82253bc75110..9ff9847dffa0 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1490,51 +1490,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2185,33 +2185,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2225,44 +2225,44 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2999,8 +2999,8 @@ parse_reta_config(const char *str,
 			return -1;
 		}
 
-		idx = hash_index / RTE_RETA_GROUP_SIZE;
-		shift = hash_index % RTE_RETA_GROUP_SIZE;
+		idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+		shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[idx].mask |= (1ULL << shift);
 		reta_conf[idx].reta[shift] = nb_queue;
 	}
@@ -3029,10 +3029,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3103,8 +3103,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
 	char *end;
 	char *str_fld[8];
 	uint16_t i;
-	uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 	int ret;
 
 	p = strchr(p0, '(');
@@ -3149,7 +3149,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3289,7 +3289,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4293,9 +4293,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4632,55 +4632,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4730,8 +4730,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4739,8 +4739,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4748,8 +4748,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4757,8 +4757,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4766,9 +4766,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4776,9 +4776,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4933,7 +4933,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4941,11 +4941,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4957,7 +4957,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5028,27 +5028,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5076,20 +5076,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5112,7 +5112,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7058,9 +7058,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7338,12 +7338,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7356,11 +7356,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7428,12 +7428,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -8950,13 +8950,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9356,7 +9356,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9422,13 +9422,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -9543,20 +9543,20 @@ cmd_set_mirror_mask_parsed(void *parsed_result,
 
 	memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
 
-	unsigned int vlan_list[ETH_MIRROR_MAX_VLANS];
+	unsigned int vlan_list[RTE_ETH_MIRROR_MAX_VLANS];
 
 	mr_conf.dst_pool = res->dstpool_id;
 
 	if (!strcmp(res->what, "pool-mirror-up")) {
 		mr_conf.pool_mask = strtoull(res->value, NULL, 16);
-		mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_UP;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VIRTUAL_POOL_UP;
 	} else if (!strcmp(res->what, "pool-mirror-down")) {
 		mr_conf.pool_mask = strtoull(res->value, NULL, 16);
-		mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_DOWN;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN;
 	} else if (!strcmp(res->what, "vlan-mirror")) {
-		mr_conf.rule_type = ETH_MIRROR_VLAN;
+		mr_conf.rule_type = RTE_ETH_MIRROR_VLAN;
 		nb_item = parse_item_list(res->value, "vlan",
-				ETH_MIRROR_MAX_VLANS, vlan_list, 1);
+				RTE_ETH_MIRROR_MAX_VLANS, vlan_list, 1);
 		if (nb_item <= 0)
 			return;
 
@@ -9656,9 +9656,9 @@ cmd_set_mirror_link_parsed(void *parsed_result,
 
 	memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
 	if (!strcmp(res->what, "uplink-mirror"))
-		mr_conf.rule_type = ETH_MIRROR_UPLINK_PORT;
+		mr_conf.rule_type = RTE_ETH_MIRROR_UPLINK_PORT;
 	else
-		mr_conf.rule_type = ETH_MIRROR_DOWNLINK_PORT;
+		mr_conf.rule_type = RTE_ETH_MIRROR_DOWNLINK_PORT;
 
 	mr_conf.dst_pool = res->dstpool_id;
 
@@ -11823,7 +11823,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11834,7 +11834,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11920,7 +11920,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11928,7 +11928,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 31d8ba1b913c..e9520e045aa0 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,60 +86,60 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
 	{ NULL, 0 },
 };
 
@@ -474,39 +474,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -636,9 +636,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -656,22 +656,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -1166,7 +1166,7 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
 	diag = rte_eth_dev_set_mtu(port_id, mtu);
 	if (diag)
 		fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
-	else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	else if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		/*
 		 * Ether overhead in driver is equal to the difference of
 		 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
@@ -1175,12 +1175,12 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
 		eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
 		if (mtu > RTE_ETHER_MTU) {
 			rte_port->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			rte_port->dev_conf.rxmode.max_rx_pkt_len =
 						mtu + eth_overhead;
 		} else
 			rte_port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	}
 }
 
@@ -2767,8 +2767,8 @@ port_rss_reta_info(portid_t port_id,
 	}
 
 	for (i = 0; i < nb_entries; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3118,7 +3118,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4181,11 +4181,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4211,11 +4211,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4256,11 +4256,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4286,11 +4286,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4360,7 +4360,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4369,7 +4369,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4377,7 +4377,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4396,7 +4396,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4404,8 +4404,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4414,8 +4414,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -4821,7 +4821,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 38cc256533b6..454a2d41c366 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 9348618d0f8d..7d658d002cb6 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -100,11 +100,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 0568ea794d48..1d878ba0a694 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 7c13210f04aa..1d0187723532 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -475,29 +475,29 @@ parse_event_printing_config(const char *optarg, int enable)
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -912,13 +912,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -960,34 +960,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1009,13 +1009,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1386,12 +1386,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6cbe9ba3c893..30bf897d6da8 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -337,7 +337,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -454,12 +454,12 @@ struct rte_eth_rxmode rx_mode = {
 };
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -513,7 +513,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1437,9 +1437,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 			"Updating jumbo frame offload failed for port %u\n",
 			pid);
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1566,8 +1566,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3154,7 +3154,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3414,17 +3414,17 @@ update_jumbo_frame_offload(portid_t portid)
 		port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
 
 	if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
-		rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		on = false;
 	} else {
-		if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+		if ((port->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0) {
 			fprintf(stderr,
 				"Frame size (%u) is not supported by port %u\n",
 				port->dev_conf.rxmode.max_rx_pkt_len,
 				portid);
 			return -1;
 		}
-		rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		on = true;
 	}
 
@@ -3436,16 +3436,16 @@ update_jumbo_frame_offload(portid_t portid)
 		/* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
 		for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
 			if (on)
-				port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+				port->rx_conf[qid].offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			else
-				port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				port->rx_conf[qid].offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		}
 	}
 
 	/* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
 	 * if unset do it here
 	 */
-	if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0) {
 		ret = rte_eth_dev_set_mtu(portid,
 				port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
 		if (ret)
@@ -3486,9 +3486,9 @@ init_port_config(void)
 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			else
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 		}
 
 		rxtx_port_config(port);
@@ -3575,9 +3575,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3585,7 +3585,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3593,8 +3593,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3610,23 +3610,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3653,7 +3653,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3703,7 +3703,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(rte_port);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 16a3598e48c5..e4ad8a6a7cff 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -446,7 +446,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 /*
  * Configuration of packet segments used to scatter received packets
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index aed820f5d340..5409d7a0deb0 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -352,11 +352,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..6248aea49abd 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 9198767b4194..bb7917010d62 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -106,7 +106,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -121,7 +121,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 8a5c8310a8b4..23c024aa1b0c 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,12 +134,12 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 2c835fa7adc7..1556f14d6921 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,12 +107,12 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index 5dac60ca1edd..cdf1c4fd259d 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -52,7 +52,7 @@ struct slave_conf {
 
 	struct rte_eth_rss_conf rss_conf;
 	uint8_t rss_key[40];
-	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t is_slave;
 	struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
 struct link_bonding_rssconf_unittest_params {
 	uint8_t bond_port_id;
 	struct rte_eth_dev_info bond_dev_info;
-	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 	struct slave_conf slave_ports[SLAVE_COUNT];
 
 	struct rte_mempool *mbuf_pool;
@@ -80,29 +80,29 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
@@ -209,13 +209,13 @@ bond_slaves(void)
 static int
 reta_set(uint16_t port_id, uint8_t value, int reta_size)
 {
-	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
 	int i, j;
 
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
 		/* select all fields to set */
 		reta_conf[i].mask = ~0LL;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			reta_conf[i].reta[j] = value;
 	}
 
@@ -234,8 +234,8 @@ reta_check_synced(struct slave_conf *port)
 	for (i = 0; i < test_params.bond_dev_info.reta_size;
 			i++) {
 
-		int index = i / RTE_RETA_GROUP_SIZE;
-		int shift = i % RTE_RETA_GROUP_SIZE;
+		int index = i / RTE_ETH_RETA_GROUP_SIZE;
+		int shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (port->reta_conf[index].reta[shift] !=
 				test_params.bond_reta_conf[index].reta[shift])
@@ -253,7 +253,7 @@ static int
 bond_reta_fetch(void) {
 	unsigned j;
 
-	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
 			j++)
 		test_params.bond_reta_conf[j].mask = ~0LL;
 
@@ -270,7 +270,7 @@ static int
 slave_reta_fetch(struct slave_conf *port) {
 	unsigned j;
 
-	for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+	for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
 		port->reta_conf[j].mask = ~0LL;
 
 	TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 3a248d512c4a..da7b7ad1f7cc 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,12 +62,12 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -156,7 +156,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -823,7 +823,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -832,13 +832,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7036f401ed95..6eecfa385537 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -178,7 +178,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -574,9 +574,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/howto/debug_troubleshoot.rst b/doc/guides/howto/debug_troubleshoot.rst
index 457ac441429a..13f30e39363e 100644
--- a/doc/guides/howto/debug_troubleshoot.rst
+++ b/doc/guides/howto/debug_troubleshoot.rst
@@ -71,7 +71,7 @@ RX Port and associated core :numref:`dtg_rx_rate`.
    * Identify if port Speed and Duplex is matching to desired values with
      ``rte_eth_link_get``.
 
-   * Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
+   * Check ``RTE_ETH_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
 
    * Check promiscuous mode if the drops do not occur for unique MAC address
      with ``rte_eth_promiscuous_get``.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index e75f4fa9e3bc..77827e750195 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,22 +877,22 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_JUMBO_FRAME
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_JUMBO_FRAME
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index a96e12d15515..7f7d6ae45658 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -165,7 +165,7 @@ Jumbo frame
 
 Supports Rx jumbo frames.
 
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_JUMBO_FRAME``.
   ``dev_conf.rxmode.max_rx_pkt_len``.
 * **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
 * **[related] API**: ``rte_eth_dev_set_mtu()``.
@@ -178,7 +178,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -206,12 +206,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -222,12 +222,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -288,9 +288,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -303,7 +303,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -340,7 +340,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -363,7 +363,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -379,7 +379,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -428,12 +428,12 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -449,13 +449,13 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -469,7 +469,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -479,13 +479,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -497,14 +497,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -518,7 +518,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -529,16 +529,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -548,8 +548,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -557,8 +557,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -567,10 +567,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -580,11 +580,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -594,16 +594,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -613,15 +613,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_packet_type_parsing:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index 7b8ef0e7823d..3dff65d89b6d 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index fcea8151bf3c..e60e3b2a761d 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -222,21 +222,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index b82e63438285..24fbccc982f5 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -69,13 +69,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -143,13 +143,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index bae73f42d882..6facb68b9545 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -607,7 +607,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate max_rx_pkt_len as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 2b42d5ec8c05..1bac8f04b96e 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1835,23 +1835,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index f72bc8a78fa6..e3bd451917f0 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -560,7 +560,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 76a4abfd6b0b..20159a1c9a90 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -58,22 +58,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
@@ -98,7 +92,7 @@ Deprecation Notices
   either by ``rte_eth_dev_configure()`` or ``rte_eth_dev_set_mtu()``.
 
   An application may need to configure device for a specific Rx packet size, like for
-  cases ``DEV_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
+  cases ``RTE_ETH_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
   can't be bigger than Rx buffer size.
   To cover these cases an application needs to know the device packet overhead to be
   able to calculate the ``mtu`` corresponding to a Rx buffer size, for this
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a554efaf..daff4de36a76 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -100,6 +100,9 @@ ABI Changes
    Also, make sure to start the actual text at the margin.
    =======================================================
 
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+  updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
 
 Known Issues
 ------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 6061674239f4..d7f5951d4639 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -526,7 +526,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index bab25fd72eee..360bf75d3861 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -153,7 +153,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index b73b211fd249..fb5d549e6227 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -91,10 +91,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -265,7 +265,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -295,7 +295,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -316,8 +316,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 74ffa4511284..dbf745852716 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -654,7 +654,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -663,7 +663,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 0ce35eb519e2..5af1cff3770e 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,21 +154,21 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_JUMBO_FRAME \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_JUMBO_FRAME \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -489,7 +489,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -656,18 +656,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1128,10 +1128,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1176,10 +1176,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1199,8 +1199,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1334,7 +1334,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1533,13 +1533,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1554,13 +1554,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1731,14 +1731,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1754,10 +1754,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index f547571b5c97..da993be35faa 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 7d367c9306ec..ddf110d6ce7e 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 623fa5e5ff5b..e870ced7e992 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -2011,9 +2011,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2153,8 +2153,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2204,8 +2204,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2218,9 +2218,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2229,13 +2229,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 786288a7b079..c0f033e06b15 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 9cb4818af11f..f33b9245bcf9 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -383,7 +383,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 
 	rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -519,8 +519,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -550,8 +550,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -588,13 +588,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -763,7 +763,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1206,25 +1206,25 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_JUMBO_FRAME	|
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	|
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1261,13 +1261,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1297,13 +1297,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1385,15 +1385,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1492,11 +1492,11 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 	if (frame_size > AXGBE_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		val = 1;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		val = 0;
 	}
 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
@@ -1842,8 +1842,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1860,8 +1860,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1878,11 +1878,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1916,8 +1916,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1927,8 +1927,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1938,14 +1938,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 33f709a6bb02..baa17a5fb43f 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 463886f17a58..14d91f868cd8 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -181,7 +181,7 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE(sc);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 		dev->data->mtu = sc->mtu;
 	}
@@ -412,7 +412,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -538,8 +538,8 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -675,7 +675,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 494a1eff3700..7e313c2fb5af 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,40 +569,40 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
-				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_VLAN_STRIP | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index de34a2f0bb2d..3f3596f39f2f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -738,11 +738,11 @@ static int bnxt_start_nic(struct bnxt *bp)
 
 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags |= BNXT_FLAG_JUMBO;
 	} else {
 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags &= ~BNXT_FLAG_JUMBO;
 	}
 
@@ -908,35 +908,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -980,8 +980,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
@@ -1030,8 +1030,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1126,18 +1126,18 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		eth_dev->data->mtu =
 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
@@ -1168,7 +1168,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1184,10 +1184,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1232,16 +1232,16 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_JUMBO_FRAME |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1293,7 +1293,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1594,10 +1594,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1819,8 +1819,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2014,7 +2014,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2027,8 +2027,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < reta_size; i++) {
 		struct bnxt_rx_queue *rxq;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (!(reta_conf[idx].mask & (1ULL << sft)))
 			continue;
@@ -2081,8 +2081,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 	}
 
 	for (idx = 0, i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
@@ -2120,7 +2120,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2138,7 +2138,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2183,30 +2183,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2246,17 +2246,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2279,11 +2279,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2294,7 +2294,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2305,7 +2305,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2336,7 +2336,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2351,7 +2351,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		bp->vxlan_port_cnt++;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2389,7 +2389,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2406,7 +2406,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2584,7 +2584,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2604,7 +2604,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2617,7 +2617,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2656,7 +2656,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2674,7 +2674,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2694,22 +2694,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2724,10 +2724,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2739,7 +2739,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2767,7 +2767,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2807,7 +2807,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -3029,10 +3029,10 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
 	if (new_mtu > RTE_ETHER_MTU) {
 		bp->flags |= BNXT_FLAG_JUMBO;
 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	} else {
 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		bp->flags &= ~BNXT_FLAG_JUMBO;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 59489b591a6f..98e1107f629c 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -974,7 +974,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1157,7 +1157,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index f29d57423585..0d9dda0c362c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2955,12 +2955,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -2977,51 +2977,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3034,11 +3034,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3047,13 +3047,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3083,71 +3083,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3160,16 +3160,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3198,12 +3198,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3229,7 +3229,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3320,7 +3320,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index bdbad53b7d7f..a9f5e13476b0 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -536,7 +536,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 957b175f1b89..632a611bf612 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -185,7 +185,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 	int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
 	int tpa_info_len = 0;
 
-	if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 		tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -278,7 +278,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 				    ag_bitmap_start, ag_bitmap_len);
 
 		/* TPA info */
-		if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 			rx_ring_info->tpa_info =
 				((struct bnxt_tpa_info *)((char *)mz->addr +
 							  tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index bbcb3b06e7df..0ac3a2b3b7d3 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -41,13 +41,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -55,14 +55,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -100,7 +100,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -110,8 +110,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -136,14 +136,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -338,7 +338,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -454,7 +454,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -525,7 +525,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 73fbdd17d126..0909bab89b76 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 47824334ae3e..401dd83f4e7d 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -350,7 +350,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -476,7 +476,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..8b104b639184 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,8 +167,8 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
 	uint8_t rss_key_len;				/**< hash key length in bytes. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 128754f4595a..20adfcf0ea9c 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -866,7 +866,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index eb8d15d16034..a6fe0304c648 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -586,7 +586,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -721,7 +721,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index a6755661c49c..a2903366a3f6 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1373,8 +1373,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1704,7 +1704,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (internals->rss_key_len != 0) {
 			slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1721,23 +1721,23 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
 			bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
 	nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
@@ -1838,7 +1838,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1961,7 +1961,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2101,7 +2101,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2423,15 +2423,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2456,7 +2456,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2498,7 +2498,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2872,7 +2872,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2888,7 +2888,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -2980,12 +2980,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < reta_count; i++) {
 		internals->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -3018,8 +3018,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
 
@@ -3279,7 +3279,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3508,7 +3508,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
 			internals->rss_key_len =
 				dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
@@ -3523,9 +3523,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 
 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
 			internals->reta_conf[i].mask = ~0LL;
-			for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 				internals->reta_conf[i].reta[j] =
-						(i * RTE_RETA_GROUP_SIZE + j) %
+						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
 						dev->data->nb_rx_queues;
 		}
 	}
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 7caec6cf14c8..9a09748673b2 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,22 +15,22 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
@@ -69,36 +69,36 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index 69e767ac3dd6..e3b1bd8ad225 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -76,12 +76,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index 0e1276c60ba2..f63b8fabefd4 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -77,11 +77,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 115e678916bb..9ff2d3dc114a 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,22 +15,22 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
@@ -69,36 +69,36 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -277,9 +277,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -530,17 +530,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 7d9f1bd61f79..08ee28658bce 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -76,12 +76,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index 763f9a14fd79..f35ae8e70438 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -76,11 +76,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0e3652ed5109..f6b75645bb69 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -54,8 +54,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -90,7 +90,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -98,10 +98,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -122,11 +122,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -169,7 +169,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -361,7 +361,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -434,24 +434,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -460,34 +460,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -513,7 +513,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -729,8 +729,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -778,13 +778,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -814,7 +814,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1191,12 +1191,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 2528b3cdaa0c..53a657f8865d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -54,41 +54,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
-	 DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP |                  \
-	 DEV_RX_OFFLOAD_VLAN_STRIP)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |                 \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |            \
+	 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH | RTE_ETH_RX_OFFLOAD_TIMESTAMP |                  \
+	 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 37720fb0954e..bf0c6d6b4ad8 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -49,11 +49,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index b6cc5286c6d0..0f6817f75d4a 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,25 +81,25 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -143,28 +143,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -204,8 +204,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -265,10 +265,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -409,13 +409,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -443,9 +443,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	frame_size += RTE_ETHER_CRC_LEN;
 
 	if (frame_size > RTE_ETHER_MAX_LEN)
-		dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -746,8 +746,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -782,8 +782,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 		goto fail;
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = reta[idx];
 			idx++;
@@ -816,7 +816,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 3fdbdba49549..1cff8d56e65b 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -66,7 +66,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -94,17 +94,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index 32c1b5dee5fa..ecdfee7b11a6 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 7c89a028bf16..dee618a0db5f 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,32 +28,32 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 177eca397600..78c1381fdb47 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -316,10 +316,10 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	/* set to jumbo mode if needed */
 	if (new_mtu > CXGBE_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
 			    -1, -1, true);
@@ -396,7 +396,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -460,9 +460,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -685,10 +685,10 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	/* Set to jumbo mode if necessary */
 	if (pkt_len > CXGBE_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
 			       &rxq->fl, NULL,
@@ -1079,13 +1079,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1098,12 +1098,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1199,9 +1199,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1245,8 +1245,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1276,8 +1276,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1478,7 +1478,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1487,7 +1487,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1496,7 +1496,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 6dd1bf1f836e..54723edc2144 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1671,7 +1671,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1695,7 +1695,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1726,10 +1726,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1866,7 +1866,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1953,7 +1953,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index e5f7721dc4b3..eddb818c4861 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -366,7 +366,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
 	int ret, i;
 	struct rte_pktmbuf_pool_private *mbp_priv;
 	u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_JUMBO_FRAME;
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
 	mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 27d670f843d2..c466256137a3 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,30 +54,30 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -189,10 +189,10 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > DPAA_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 		tx_offloads, dev_tx_offloads_nodis);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len;
 
 		DPAA_PMD_DEBUG("enabling jumbo");
@@ -259,7 +259,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 			- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -304,43 +304,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -556,30 +556,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -612,13 +612,13 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -645,14 +645,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -686,7 +686,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -697,15 +697,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -981,7 +981,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
 			buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("max RxPkt size %d too big to fit "
@@ -1303,7 +1303,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1319,7 +1319,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1349,10 +1349,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1396,11 +1396,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1663,10 +1663,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 641e7027f12e..7c92b2a42e3f 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -216,7 +216,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -233,7 +233,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -270,13 +270,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -314,12 +314,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -346,8 +346,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c12169578e22..23bb985b95e9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,34 +38,34 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -151,7 +151,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -252,13 +252,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -271,10 +271,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -292,16 +292,16 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -328,15 +328,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -559,7 +559,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		tx_offloads, dev_tx_offloads_nodis);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 				priv->token, eth_conf->rxmode.max_rx_pkt_len
@@ -578,7 +578,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -592,12 +592,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -615,7 +615,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -628,12 +628,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -665,8 +665,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1477,10 +1477,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > DPAA2_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -1881,7 +1881,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1893,9 +1893,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2056,9 +2056,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2068,9 +2068,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2114,14 +2114,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2129,7 +2129,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2137,7 +2137,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index b9c729f6cdc0..ca75a2175524 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,12 +65,12 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 3b4d9c3ee6f4..ca488fea966f 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -81,15 +81,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a0ca371b0275..6fb205f8577f 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -599,8 +599,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -613,39 +613,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1104,9 +1104,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1164,17 +1164,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1426,15 +1426,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1603,7 +1603,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1685,13 +1685,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1820,11 +1820,11 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > E1000_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index dfd8f2fd0074..cf672c32277b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -172,7 +172,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1168,11 +1168,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1367,15 +1367,15 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 	max_rx_pktlen = em_get_max_pktlen(dev);
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
-		rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	return rx_offload_capa;
 }
@@ -1468,7 +1468,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1806,7 +1806,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1839,7 +1839,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME ||
 				rctl_bsize < RTE_ETHER_MAX_LEN) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1849,7 +1849,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1862,7 +1862,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1874,21 +1874,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	if ((hw->mac.type == e1000_ich9lan ||
 			hw->mac.type == e1000_pch2lan ||
 			hw->mac.type == e1000_ich10lan) &&
-			rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+			rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
 		E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
 	}
 
 	if (hw->mac.type == e1000_pch2lan) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
 		else
 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1908,7 +1908,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		rctl |= E1000_RCTL_LPE;
 	else
 		rctl &= ~E1000_RCTL_LPE;
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 10ee0f33415a..03509c960326 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1082,21 +1082,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1108,12 +1108,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1126,17 +1126,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1155,8 +1155,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1296,8 +1296,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1305,7 +1305,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1319,39 +1319,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2194,21 +2194,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2234,7 +2234,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2260,9 +2260,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2305,12 +2305,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2411,17 +2411,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2597,7 +2597,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2686,7 +2686,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 				dev->data->dev_conf.rxmode.max_rx_pkt_len);
 }
@@ -2704,7 +2704,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						VLAN_TAG_SIZE);
@@ -2716,22 +2716,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2883,7 +2883,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3037,13 +3037,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3112,18 +3112,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3271,22 +3271,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3584,16 +3584,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -3625,16 +3625,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -4407,11 +4407,11 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > E1000_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 278d5d2712af..a57dde59dbc0 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /**
@@ -185,7 +185,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1456,13 +1456,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1635,20 +1635,20 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_JUMBO_FRAME |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1729,7 +1729,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1963,23 +1963,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2045,23 +2045,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2183,15 +2183,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2227,9 +2227,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	/* VLVF: set up filters for vlan tags as configured */
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
-		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
-			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+			(cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
 			E1000_VLVF_POOLSEL_MASK)));
 	}
 
@@ -2281,7 +2281,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2295,14 +2295,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2342,7 +2342,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 		rctl |= E1000_RCTL_LPE;
@@ -2351,7 +2351,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2387,7 +2387,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2458,7 +2458,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2502,16 +2502,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2519,7 +2519,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2559,7 +2559,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2758,7 +2758,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 4cebf60a68a7..4e3ee72608f4 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -116,10 +116,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -310,7 +310,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -318,7 +318,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -335,12 +335,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -623,9 +623,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -684,7 +684,7 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
 	uint32_t max_frame_len = adapter->max_mtu;
 
 	if (adapter->edev_data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_JUMBO_FRAME)
+	    RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		max_frame_len =
 			adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
 
@@ -915,7 +915,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -1854,9 +1854,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
@@ -1907,36 +1907,36 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Set Tx & Rx features available for device */
 	if (adapter->offloads.tso4_supported)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
+		tx_feat	|= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_csum_supported)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+		tx_feat |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_csum_supported)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM  |
-			DEV_RX_OFFLOAD_TCP_CKSUM;
+		rx_feat |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
-	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	rx_feat |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+	tx_feat |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
 	if (adapter->offloads.rss_hash_supported)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
@@ -2100,7 +2100,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 06ac8b06b5cb..3b1844e50982 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -54,8 +54,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 88afe13da04d..e7b57659491d 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 		/* Each reta_conf is for 64 entries.
 		 * To support 128 we use 2 conf of 64.
 		 */
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
 			entry_value =
 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -137,10 +137,10 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	int reta_idx;
 
 	if (reta_size == 0 || reta_conf == NULL ||
-	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
+	    (reta_size > RTE_ETH_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -155,8 +155,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0 ; i < reta_size ; i++) {
-		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
 			reta_conf[reta_conf_idx].reta[reta_idx] =
 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -200,34 +200,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -236,10 +236,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -248,10 +248,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -269,11 +269,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -285,11 +285,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -335,43 +335,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -542,7 +542,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index b496cd470045..e0fb44edeb41 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,11 +207,11 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC |
-		 DEV_RX_OFFLOAD_JUMBO_FRAME);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 
 	return 0;
 }
@@ -462,7 +462,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -679,10 +679,10 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > ENETC_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads &=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
@@ -708,7 +708,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		uint32_t max_len;
 
 		max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -723,7 +723,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 			RTE_ETHER_CRC_LEN;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -731,10 +731,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cfdd..d5493c98345d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -178,7 +178,7 @@ struct enic {
 	 */
 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
 	uint8_t rss_enable;
-	uint64_t rss_hf; /* ETH_RSS flags */
+	uint64_t rss_hf; /* RTE_ETH_RSS flags */
 	union vnic_rss_key rss_key;
 	union vnic_rss_cpu rss_cpu;
 
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8d5797523b8f..30cd1d4f5dd1 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -293,8 +293,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -319,17 +319,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -431,14 +431,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -770,8 +770,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -802,8 +802,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
 	 */
 	rss_cpu = enic->rss_cpu;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			rss_cpu.cpu[i / 4].b[i % 4] =
 				enic_rte_rq_idx_to_sop_idx(
@@ -879,7 +879,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -965,8 +965,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1006,7 +1006,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1035,7 +1035,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 2affd380c6a4..754cf362c6d8 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
@@ -1386,15 +1386,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1405,12 +1405,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1751,9 +1751,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index a8f5332a407f..12f734260ca5 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,20 +201,20 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 8216063a3d8b..9b22a6ce8941 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 5ff33e03e034..8cb215651df8 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1182,53 +1182,53 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 916b856acc4b..7af115399e0f 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 3236290e4021..e77cfa3f9882 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,21 +1767,21 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_JUMBO_FRAME |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1966,12 +1966,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2112,8 +2112,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2161,8 +2161,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2199,15 +2199,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2244,15 +2244,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2607,7 +2607,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2623,7 +2623,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index 1a7240154668..105c0f48a616 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,25 +732,25 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_JUMBO_FRAME |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -847,20 +847,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -902,8 +902,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1552,10 +1552,10 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	frame_size = HINIC_MTU_TO_PKTLEN(mtu);
 	if (frame_size > HINIC_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	nic_dev->mtu_size = mtu;
@@ -1664,8 +1664,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1686,8 +1686,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1873,13 +1873,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1893,14 +1893,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1944,7 +1944,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1965,14 +1965,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -2008,7 +2008,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2029,15 +2029,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2067,7 +2067,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2081,8 +2081,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 
 	/* update rss indir_tbl */
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2147,8 +2147,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index b71e2e9ea451..953c146d0200 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 7d37004972bf..64d1da09a707 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2218,9 +2218,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2228,7 +2228,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2237,7 +2237,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2380,7 +2380,7 @@ hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
 	uint16_t mtu;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME))
 		return 0;
 
 	/*
@@ -2440,11 +2440,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2504,15 +2504,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2533,7 +2533,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2633,10 +2633,10 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (is_jumbo_frame)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	rte_spinlock_unlock(&hw->lock);
 
@@ -2649,15 +2649,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2668,19 +2668,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2699,7 +2699,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2725,41 +2725,41 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_JUMBO_FRAME |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_outer_udp_cksum_supported(hw))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_indep_txrx_supported(hw))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_ptp_supported(hw))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2843,7 +2843,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2856,29 +2856,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2898,8 +2898,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2911,7 +2911,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3257,31 +3257,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3610,39 +3610,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4305,14 +4305,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4562,7 +4562,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4603,7 +4603,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4723,8 +4723,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4735,8 +4735,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4786,7 +4786,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4813,7 +4813,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4932,10 +4932,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4963,7 +4963,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5145,19 +5145,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5395,20 +5395,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5424,26 +5424,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5478,28 +5478,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5507,11 +5507,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5645,9 +5645,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5920,7 +5920,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6131,17 +6131,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6287,7 +6287,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6587,7 +6587,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6877,7 +6877,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6909,7 +6909,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -7008,12 +7008,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0e4e4269a12f..c40d28af1d46 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -191,10 +191,10 @@ struct hns3_mac {
 	bool default_addr_setted; /* whether default addr(mac_addr) is set */
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1114,9 +1114,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 8d9b7979c806..53d79bb2106c 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -809,15 +809,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -829,7 +829,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	 * If jumbo frames are enabled, MTU needs to be refreshed
 	 * according to the maximum RX packet length.
 	 */
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
 		if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
 		    max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
@@ -853,7 +853,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -931,10 +931,10 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	}
 	if (mtu > RTE_ETHER_MTU)
 		dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_JUMBO_FRAME;
+						~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 	rte_spinlock_unlock(&hw->lock);
 
@@ -963,33 +963,33 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_JUMBO_FRAME |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_outer_udp_cksum_supported(hw))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_indep_txrx_supported(hw))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1669,10 +1669,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1682,10 +1682,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1753,7 +1753,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1778,8 +1778,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2088,7 +2088,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2247,31 +2247,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2599,11 +2599,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index fc77979c5f14..0ac8705b590b 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index df8485904688..395590c86c03 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..85495bbe89d9 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
 	memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
 	       sizeof(rss_cfg->rss_indirection_tbl));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
 			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_lock(&hw->lock);
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
 						rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 0f222b37f9d1..01e43791572b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1912,7 +1912,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1957,7 +1957,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2833,7 +2833,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3127,7 +3127,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4279,7 +4279,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_ptp_supported(hw))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4291,16 +4291,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index cd7c21c1d0c8..2fa3a01dd3bf 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index 844512f6ceec..d01a8d62bfb1 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_ptp_supported(hw))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_ptp_supported(hw))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b230e2ed17a..c199a87c6df4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1641,7 +1641,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1909,8 +1909,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1944,13 +1944,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2227,17 +2227,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2345,13 +2345,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2910,34 +2910,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2964,8 +2964,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2980,28 +2980,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -3018,9 +3018,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3748,34 +3748,34 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3834,7 +3834,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3848,17 +3848,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3897,7 +3897,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3944,12 +3944,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3963,12 +3963,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -4027,37 +4027,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4140,17 +4140,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4166,10 +4166,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4316,7 +4316,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4469,7 +4469,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4485,8 +4485,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4512,7 +4512,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4529,8 +4529,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -4847,7 +4847,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6132,10 +6132,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6262,9 +6262,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7117,7 +7117,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7152,7 +7152,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -8730,16 +8730,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8765,12 +8765,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8862,7 +8862,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8870,7 +8870,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8949,7 +8949,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10412,8 +10412,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		return I40E_ERR_NO_MEMORY;
 	}
 	switch (mirror_conf->rule_type) {
-	case ETH_MIRROR_VLAN:
-		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
+	case RTE_ETH_MIRROR_VLAN:
+		for (i = 0, j = 0; i < RTE_ETH_MIRROR_MAX_VLANS; i++) {
 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
 				mirr_rule->entries[j] =
 					mirror_conf->vlan.vlan_id[i];
@@ -10427,8 +10427,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		}
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
 		break;
-	case ETH_MIRROR_VIRTUAL_POOL_UP:
-	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
+	case RTE_ETH_MIRROR_VIRTUAL_POOL_UP:
+	case RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN:
 		/* check if the specified pool bit is out of range */
 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
@@ -10453,15 +10453,15 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
 		}
 		/* egress and ingress in aq commands means from switch but not port */
 		mirr_rule->rule_type =
-			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
+			(mirror_conf->rule_type == RTE_ETH_MIRROR_VIRTUAL_POOL_UP) ?
 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
 		break;
-	case ETH_MIRROR_UPLINK_PORT:
+	case RTE_ETH_MIRROR_UPLINK_PORT:
 		/* egress and ingress in aq commands means from switch but not port*/
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
 		break;
-	case ETH_MIRROR_DOWNLINK_PORT:
+	case RTE_ETH_MIRROR_DOWNLINK_PORT:
 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
 		break;
 	default:
@@ -10603,16 +10603,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10840,7 +10840,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11348,7 +11348,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11396,7 +11396,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
@@ -11774,10 +11774,10 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > I40E_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cd6deabd60b3..f21c2de6bdb9 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -139,17 +139,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1076,7 +1076,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 0cfe13b7b227..cda426fe5614 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1077,7 +1077,7 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 	 * VLAN_STRIP by default. So reconfigure the vlan_offload
 	 * as it was done by the app earlier.
 	 */
-	err = i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+	err = i40evf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to set vlan_strip");
 
@@ -1403,28 +1403,28 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
 				pf_msg->event_data.link_event_adv.link_status;
 
 			switch (pf_msg->event_data.link_event_adv.link_speed) {
-			case ETH_SPEED_NUM_100M:
+			case RTE_ETH_SPEED_NUM_100M:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_100MB;
 				break;
-			case ETH_SPEED_NUM_1G:
+			case RTE_ETH_SPEED_NUM_1G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_1GB;
 				break;
-			case ETH_SPEED_NUM_2_5G:
+			case RTE_ETH_SPEED_NUM_2_5G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_2_5GB;
 				break;
-			case ETH_SPEED_NUM_5G:
+			case RTE_ETH_SPEED_NUM_5G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_5GB;
 				break;
-			case ETH_SPEED_NUM_10G:
+			case RTE_ETH_SPEED_NUM_10G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_10GB;
 				break;
-			case ETH_SPEED_NUM_20G:
+			case RTE_ETH_SPEED_NUM_20G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_20GB;
 				break;
-			case ETH_SPEED_NUM_25G:
+			case RTE_ETH_SPEED_NUM_25G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_25GB;
 				break;
-			case ETH_SPEED_NUM_40G:
+			case RTE_ETH_SPEED_NUM_40G:
 				vf->link_speed = VIRTCHNL_LINK_SPEED_40GB;
 				break;
 			default:
@@ -1770,7 +1770,7 @@ static int
 i40evf_init_vlan(struct rte_eth_dev *dev)
 {
 	/* Apply vlan offload setting */
-	i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+	i40evf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK);
 
 	return 0;
 }
@@ -1785,9 +1785,9 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40evf_enable_vlan_strip(dev);
 		else
 			i40evf_disable_vlan_strip(dev);
@@ -1933,7 +1933,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 	/**
 	 * Check if the jumbo frame and maximum packet length are set correctly
 	 */
-	if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
 		    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1954,7 +1954,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
 		}
 	}
 
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -2290,35 +2290,35 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
 	/* Linux driver PF host */
 	switch (vf->link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (vf->link_up)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			new_link.link_speed = ETH_SPEED_NUM_NONE;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 	/* full duplex only */
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-		!(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+		!(dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -2367,36 +2367,36 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	dev_info->tx_queue_offload_capa = 0;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -2596,10 +2596,10 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_64) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_64) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number of hardware can "
-			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+			"support (%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_64);
 		return -EINVAL;
 	}
 
@@ -2612,8 +2612,8 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -2635,10 +2635,10 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	uint8_t *lut;
 	int ret;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_64) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_64) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number of hardware can "
-			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
+			"support (%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_64);
 		return -EINVAL;
 	}
 
@@ -2652,8 +2652,8 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -2770,7 +2770,7 @@ i40evf_config_rss(struct i40e_vf *vf)
 	uint8_t *lut_info;
 	int ret;
 
-	if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (vf->dev_data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		i40evf_disable_rss(vf);
 		PMD_DRV_LOG(DEBUG, "RSS not configured");
 		return 0;
@@ -2887,10 +2887,10 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > I40E_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
 	return ret;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3c1570bd9c47..d1cb992be61d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 1fb8c9abfcc6..3755d4d3fe2a 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -102,47 +102,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -201,30 +201,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -232,68 +232,68 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -308,27 +308,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -564,29 +564,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -825,7 +825,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -932,7 +932,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1070,22 +1070,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8329cbdd4e30..3bad4052ed1b 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -2005,7 +2005,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2265,7 +2265,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -2925,7 +2925,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 	rxq->max_pkt_len =
 		RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
 			rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -3441,7 +3441,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 5ccf5773e857..303a4db47dbd 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 };
 
 struct i40e_tx_entry {
@@ -165,7 +165,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index bd21d6422394..5f00d43950aa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -899,7 +899,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52ed98d62d0..0192164c35fa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 0481b5538132..6d90b0f3511b 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd07811198..1d4383e89327 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -48,18 +48,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 574cfe055e7c..fc0087968b78 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -265,53 +265,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -330,13 +330,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -362,10 +362,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -466,7 +466,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -478,10 +478,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -511,8 +511,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -585,7 +585,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	/* Check if the jumbo frame and maximum packet length are set
 	 * correctly.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
 		    max_pkt_len > IAVF_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -608,7 +608,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -943,35 +943,35 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1031,42 +1031,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1214,14 +1214,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1250,9 +1250,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1294,8 +1294,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(lut, vf->rss_lut, reta_size);
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -1331,8 +1331,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = vf->rss_lut[i];
 	}
@@ -1457,10 +1457,10 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > IAVF_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -1564,7 +1564,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 2b03dad8589c..1329a389f742 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -341,83 +341,83 @@ struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -435,7 +435,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -477,9 +477,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -497,7 +497,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -539,7 +539,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -573,57 +573,57 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV4) {
 		struct virtchnl_proto_hdrs hdr = {
 			.tunnel_level = TUNNEL_LEVEL_OUTER,
 			.count = 3,
@@ -641,7 +641,7 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV6) {
 		struct virtchnl_proto_hdrs hdr = {
 			.tunnel_level = TUNNEL_LEVEL_OUTER,
 			.count = 3,
@@ -804,28 +804,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -835,11 +835,11 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
@@ -847,17 +847,17 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -874,7 +874,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -882,15 +882,15 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
@@ -898,15 +898,15 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
@@ -914,46 +914,46 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -970,7 +970,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1067,10 +1067,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1081,27 +1081,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1111,9 +1111,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1130,15 +1130,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index e33fe4576b6e..4ff856fc82aa 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -609,7 +609,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d633..096be81e8a69 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 475070e036ef..8f9a397e4143 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -904,7 +904,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH ||
+				RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -957,7 +957,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					raw_desc_bh1, 1);
 
 			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 571161c0cdec..2329928c62cb 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1138,7 +1138,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-			    DEV_RX_OFFLOAD_RSS_HASH ||
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1191,7 +1191,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						 raw_desc_bh1, 1);
 
 				if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-						DEV_RX_OFFLOAD_RSS_HASH) {
+						RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1719,7 +1719,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e9055259b..58f928bdd7ca 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -818,7 +818,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 4c2e0c7216fd..ec53478083b4 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -807,7 +807,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index cab7c4da8759..6226aa5a80c2 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -66,7 +66,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	/* Check if the jumbo frame and maximum packet length are set
 	 * correctly.
 	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (max_pkt_len <= ICE_ETH_MAX_LEN ||
 		    max_pkt_len > ICE_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -89,7 +89,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -559,7 +559,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -620,7 +620,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 
 	return 0;
@@ -635,8 +635,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -658,28 +658,28 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -896,42 +896,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -950,11 +950,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -981,8 +981,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 970461f3e90a..0dac1b92bfdb 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -135,29 +135,29 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -239,9 +239,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -338,7 +338,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -368,7 +368,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -441,7 +441,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a4cd39c954f1..459718ad33f6 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1449,9 +1449,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2809,16 +2809,16 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_FRAG_IPV6)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV6)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -2828,7 +2828,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2838,7 +2838,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2848,7 +2848,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -2859,7 +2859,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -2870,7 +2870,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -2881,7 +2881,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -2892,7 +2892,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -2903,7 +2903,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -2913,7 +2913,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -2923,7 +2923,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -2933,7 +2933,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -2943,7 +2943,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -2953,7 +2953,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -2963,7 +2963,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -2973,7 +2973,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -2982,7 +2982,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_FRAG_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_FRAG_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3124,8 +3124,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3344,8 +3344,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3449,40 +3449,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3521,24 +3521,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3603,8 +3603,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3620,55 +3620,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -3767,10 +3767,10 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	if (frame_size > ICE_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -4161,15 +4161,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -4284,8 +4284,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4334,8 +4334,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -5244,7 +5244,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5268,7 +5268,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651c1c7f..1c4bc4e30349 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -115,19 +115,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 54d14dfcddfb..beb863f70568 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,80 +373,80 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -465,17 +465,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -640,51 +640,51 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
@@ -693,30 +693,30 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -725,10 +725,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -737,10 +737,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -752,15 +752,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
@@ -769,15 +769,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
@@ -786,15 +786,15 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
@@ -802,22 +802,22 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -851,7 +851,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -873,10 +873,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -888,9 +888,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -909,16 +909,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f047ee..63c07e001f07 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -280,7 +280,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 				   ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
 				   dev_data->dev_conf.rxmode.max_rx_pkt_len);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
 		    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -1103,7 +1103,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2780,7 +2780,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3254,7 +3254,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 9725ac018043..8c870354619e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -473,7 +473,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5bba9887d296..6d2038975830 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -584,7 +584,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -994,7 +994,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 2d8ef7dc8a93..a5b573c22da2 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 224a0954836b..c75b06cae1fe 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -314,8 +314,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -325,7 +325,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -341,8 +341,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -480,12 +480,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -497,9 +497,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -532,7 +532,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -979,18 +979,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -1000,33 +1000,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1490,14 +1490,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1523,9 +1523,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -1603,11 +1603,11 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (mtu > RTE_ETHER_MTU) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= IGC_RCTL_LPE;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~IGC_RCTL_LPE;
 	}
 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
@@ -2165,13 +2165,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2203,16 +2203,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2258,29 +2258,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to update the register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* check mask whether need to read the register value first */
@@ -2314,29 +2314,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to read register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* read register and get the queue index */
@@ -2393,23 +2393,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2495,7 +2495,7 @@ igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 		return 0;
 
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0)
 		goto write_ext_vlan;
 
 	/* Update maximum packet length */
@@ -2528,7 +2528,7 @@ igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 		return 0;
 
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) == 0)
 		goto write_ext_vlan;
 
 	/* Update maximum packet length */
@@ -2554,22 +2554,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2587,7 +2587,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 7b6c209df3b6..066792b8a2d8 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -59,38 +59,38 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_JUMBO_FRAME | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index b5489eedd220..82e7e084b41d 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -866,23 +866,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1056,10 +1056,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1099,7 +1099,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
 
 	/* Configure support of jumbo frames, if any. */
-	if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		rctl |= IGC_RCTL_LPE;
 
 		/*
@@ -1130,7 +1130,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1196,7 +1196,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1240,20 +1240,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1261,7 +1261,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1298,12 +1298,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2272,10 +2272,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index e6207939665e..5e7c22c339d1 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -397,17 +397,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -421,25 +421,25 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -474,9 +474,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -498,14 +498,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -556,12 +556,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = tbl_sz / RTE_RETA_GROUP_SIZE;
+	num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if (reta_conf[i].mask & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -596,12 +596,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-			&lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
-			RTE_RETA_GROUP_SIZE);
+			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+			RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -629,17 +629,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -671,17 +671,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -853,15 +853,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -885,12 +885,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -907,7 +907,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index 431eda777b78..d4eb6c1d78be 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index b83ea1bcaa6a..0c1f6113d0e9 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -204,11 +204,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -745,11 +745,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 589d9fa5877d..2f6df2c2f6b8 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,31 +67,31 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2410,10 +2410,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2471,9 +2471,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2529,9 +2529,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2803,10 +2803,10 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
 
 	if (frame_size > IPN3KE_ETH_MAX_LEN)
 		dev_data->dev_conf.rxmode.offloads |=
-			(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
+			(uint64_t)(RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 	else
 		dev_data->dev_conf.rxmode.offloads &=
-			(uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
+			(uint64_t)(~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
 
 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b5371568b54d..e425cea05aa8 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1865,7 +1865,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1880,7 +1880,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1967,10 +1967,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2091,7 +2091,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2108,7 +2108,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2130,17 +2130,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2151,19 +2151,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		ixgbe_vlan_hw_strip_config(dev);
-	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2202,10 +2201,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2229,18 +2228,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2250,12 +2249,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2264,12 +2263,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2284,13 +2283,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2299,15 +2298,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2316,39 +2315,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2357,7 +2356,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2381,8 +2380,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2627,15 +2626,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2712,17 +2711,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2736,7 +2735,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2754,17 +2753,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3839,7 +3838,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3849,9 +3848,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3890,21 +3889,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3973,9 +3972,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4218,11 +4217,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4244,8 +4243,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4281,37 +4280,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4528,7 +4527,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4747,13 +4746,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5051,8 +5050,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5099,8 +5098,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5199,11 +5198,11 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* switch to jumbo mode if needed */
 	if (frame_size > IXGBE_ETH_MAX_LEN) {
 		dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
 		dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -5271,22 +5270,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5346,8 +5345,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5581,10 +5580,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5715,12 +5714,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5734,15 +5733,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -5753,8 +5752,8 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
-	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
-	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+	((mirror_type) & ~(uint8_t)(RTE_ETH_MIRROR_VIRTUAL_POOL_UP | \
+	RTE_ETH_MIRROR_UPLINK_PORT | RTE_ETH_MIRROR_DOWNLINK_PORT | RTE_ETH_MIRROR_VLAN))
 
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
@@ -5794,7 +5793,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) {
 		mirror_type |= IXGBE_MRCTL_VLME;
 		/* Check if vlan id is valid and find conresponding VLAN ID
 		 * index in VLVF
@@ -5827,7 +5826,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
 			mr_info->mr_conf[rule_id].vlan.vlan_mask =
 						mirror_conf->vlan.vlan_mask;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			for (i = 0; i < RTE_ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
 				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
 					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
 						mirror_conf->vlan.vlan_id[i];
@@ -5836,7 +5835,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 			mv_lsb = 0;
 			mv_msb = 0;
 			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+			for (i = 0; i < RTE_ETH_VMDQ_MAX_VLAN_FILTERS; i++)
 				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
 		}
 	}
@@ -5845,7 +5844,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 	 * if enable pool mirror, write related pool mask register,if disable
 	 * pool mirror, clear PFMRVM register
 	 */
-	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VIRTUAL_POOL_UP) {
 		mirror_type |= IXGBE_MRCTL_VPME;
 		if (on) {
 			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
@@ -5859,9 +5858,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 			mr_info->mr_conf[rule_id].pool_mask = 0;
 		}
 	}
-	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_UPLINK_PORT)
 		mirror_type |= IXGBE_MRCTL_UPME;
-	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_DOWNLINK_PORT)
 		mirror_type |= IXGBE_MRCTL_DPME;
 
 	/* read  mirror control register and recalculate it */
@@ -5882,13 +5881,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
 	/* write pool mirrror control  register */
-	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VIRTUAL_POOL_UP) {
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
 				mp_msb);
 	}
 	/* write VLAN mirrror control  register */
-	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+	if (mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) {
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
 				mv_msb);
@@ -6266,8 +6265,8 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
-	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) &&
+	    rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
 	else
@@ -6942,15 +6941,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7361,16 +7360,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7380,10 +7379,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7439,7 +7438,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7450,7 +7449,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7474,9 +7473,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7489,7 +7488,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7742,7 +7741,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7774,7 +7773,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7871,12 +7870,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7908,11 +7907,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a0ce18ca246b..3443154589e8 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -113,15 +113,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 511b612f7fe4..0557de6c1aa5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index fbf2b17d160f..d03238b728ba 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -107,15 +107,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -266,15 +266,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -604,11 +604,11 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 		hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 		if (max_frame > IXGBE_ETH_MAX_LEN) {
 			dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_JUMBO_FRAME;
+				RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		} else {
 			dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_JUMBO_FRAME;
+				~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 		}
 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -684,29 +684,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c814a28cb49a..4e712c2b5e61 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2591,26 +2591,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2778,7 +2778,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3014,7 +3014,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3025,20 +3025,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_JUMBO_FRAME |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3048,20 +3048,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3116,7 +3116,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3520,23 +3520,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3618,23 +3618,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3710,12 +3710,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3740,7 +3740,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3749,7 +3749,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3765,7 +3765,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3789,7 +3789,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3871,7 +3871,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3887,12 +3887,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3902,7 +3902,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3920,12 +3920,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3935,7 +3935,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3962,7 +3962,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3989,7 +3989,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4158,7 +4158,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4171,8 +4171,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4185,7 +4185,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4196,7 +4196,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4212,15 +4212,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4270,9 +4270,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
 	}
 	if (config_dcb_tx) {
 		/* Only support an equally distributed
@@ -4286,7 +4285,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4322,7 +4321,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4336,7 +4335,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4357,12 +4356,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4418,7 +4417,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4539,11 +4538,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4564,17 +4563,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4601,21 +4600,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4626,18 +4625,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4671,7 +4670,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4684,13 +4683,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4898,7 +4897,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4926,10 +4925,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4937,8 +4936,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4952,7 +4951,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4961,7 +4960,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5082,7 +5081,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5090,7 +5089,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -5119,7 +5118,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5128,7 +5127,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5171,11 +5170,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5190,7 +5189,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5200,7 +5199,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5406,9 +5405,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5696,7 +5695,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5745,7 +5744,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -5754,8 +5753,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 476ef62cfda2..220efffe4d08 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -226,7 +226,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index adba855ca30f..714707941537 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,7 +278,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index a8407e742e6d..c2ab3131f22e 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index d5b636a19408..536e33010703 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..eef6f6661c74 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 871d11c4133d..29060ca76f93 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index b72060a4499b..e91f4c13c63b 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -483,10 +483,10 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
 	if (frame_len > LIO_ETH_MAX_LEN)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_JUMBO_FRAME;
+			RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
 		eth_dev->data->dev_conf.rxmode.offloads &=
-			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+			~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
 	eth_dev->data->mtu = mtu;
@@ -540,10 +540,10 @@ lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
 
-	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				rss_state->itable[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -583,12 +583,12 @@ lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
-		       RTE_RETA_GROUP_SIZE);
+		       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+		       RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -616,17 +616,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -694,42 +694,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -778,7 +778,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -835,7 +835,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -933,10 +933,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -944,18 +944,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1107,8 +1107,8 @@ lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
 
 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
 				  i % eth_dev->data->nb_rx_queues : 0);
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
 	}
@@ -1124,10 +1124,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1509,7 +1509,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1530,11 +1530,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1746,9 +1746,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index f58ff4c0cb77..a117a05228fc 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index de6becd45e3e..ea66f5bfd452 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -1216,7 +1216,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1367,10 +1367,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 978cbb8201ea..9977c761880a 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,13 +682,13 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_JUMBO_FRAME |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -704,7 +704,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -831,7 +831,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 2df26842fbe4..19feec5e5202 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 5f8766aa481e..c40cda8fcaf9 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1343,8 +1343,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1627,7 +1627,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index f84e061fe719..ff1c8e17460a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1463,10 +1463,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e02714e23196..9588dff05180 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1226,7 +1226,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4762fa0f5f88..7048fff3883e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -272,7 +272,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -522,8 +522,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -531,11 +531,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -546,8 +546,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -555,11 +555,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -612,32 +612,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1048,7 +1048,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1601,14 +1601,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6364,8 +6364,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -6989,7 +6989,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8657,7 +8657,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 76ad53f2a1e8..d5d3a89374fe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -328,18 +328,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 3f6f5dcfbadb..02a337dc2c93 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10934,9 +10934,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10944,9 +10944,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10960,11 +10960,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10972,11 +10972,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14495,9 +14495,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14506,9 +14506,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14517,11 +14517,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14530,11 +14530,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14682,8 +14682,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index b93fd4d2c962..ef286a13729c 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1834,7 +1834,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1847,7 +1847,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..a4f690039e24 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
@@ -170,8 +170,8 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	/* Fill each entry of the table even if its bit is not set. */
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
 			(*priv->reta_idx)[i];
 	}
 	return 0;
@@ -209,8 +209,8 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
 		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index abd8ce798986..0d6c58f47d89 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,23 +333,23 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -363,7 +363,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -695,7 +695,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1329,7 +1329,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1431,7 +1431,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1475,7 +1475,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pkt_len,
@@ -1490,7 +1490,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1551,9 +1551,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1562,11 +1562,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1596,7 +1596,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index eb4d34ca559e..06cdeba662bc 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,35 +98,35 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -979,9 +979,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 		txq_ctrl->txq.tso_en = 1;
 	}
 	txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
-	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				 DEV_TX_OFFLOAD_UDP_TNL_TSO |
-				 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+	txq_ctrl->txq.swp_en = ((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
 				txq_ctrl->txq.offloads) && config->swp;
 }
 
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 7e1df1c75147..578816fe0513 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -464,8 +464,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index a3ee15020466..37803fe34538 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,11 +126,11 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 MRVL_NETA_ETH_HDRS_LEN;
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -155,10 +155,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -510,28 +510,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index ef8067790f82..ccd47e8f4927 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,15 +54,15 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			    DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			    RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index dfa7ecc09039..d28125ce9635 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -735,7 +735,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 078aefbb8da4..539e196b807e 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,16 +58,16 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_JUMBO_FRAME | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -443,14 +443,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -484,8 +484,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -496,7 +496,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 MRVL_PP2_ETH_HDRS_LEN;
 		if (dev->data->mtu > priv->max_mtu) {
@@ -508,7 +508,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -530,7 +530,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -632,7 +632,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -653,7 +653,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -673,14 +673,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -902,7 +902,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -938,11 +938,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1211,30 +1211,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1718,11 +1718,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1742,9 +1742,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1873,13 +1873,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1888,7 +1888,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2033,7 +2033,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2189,7 +2189,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2198,10 +2198,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2247,19 +2247,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2336,11 +2336,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3159,7 +3159,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..9c4ae80e7e16 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -306,8 +306,8 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -346,8 +346,8 @@ static int hn_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index e3f7e636d731..cacb30385404 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 7e91d5984740..c2ff1c999869 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index d6d4ba9663c6..f19e9834848b 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 1b4bc33593fb..dff7cfd3d6f9 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,20 +359,20 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		hw->mtu = rxmode->max_rx_pkt_len;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -384,13 +384,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -398,7 +398,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -486,14 +486,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -505,15 +505,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -702,26 +702,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -758,25 +758,25 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	/* All NFP devices support jumbo frames */
-	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -847,7 +847,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -964,9 +964,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if ((uint32_t)mtu > RTE_ETHER_MTU)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* update max frame size */
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
@@ -990,12 +990,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1035,8 +1035,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1116,8 +1116,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1155,22 +1155,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1240,22 +1240,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 534a38c14f94..7a6a963bf6cc 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -140,7 +140,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index b697b55865cc..ac960328c7de 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -101,7 +101,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 508bafc12a14..df4ddb3b40e2 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -381,9 +381,9 @@ eth_rss_reta_update(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
 		internal->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -406,8 +406,8 @@ eth_rss_reta_query(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 	}
@@ -538,8 +538,8 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
-	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 9f4c0503b4d4..947dabdca2c5 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -534,13 +534,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -553,9 +553,9 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		return rc;
 
 	if (frame_size > OCCTX_L2_MAX_LEN)
-		nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -582,7 +582,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -854,10 +854,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1369,7 +1369,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index b73515de37ca..7215039507c3 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,24 +55,24 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_JUMBO_FRAME	 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 75d4cabf2e7c..ebe503438144 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -664,7 +664,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -691,7 +691,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -706,29 +706,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -767,43 +767,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -913,8 +913,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1857,21 +1857,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2244,7 +2244,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2573,8 +2573,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 7871e3d30bda..04e43b63c192 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,44 +117,44 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_JUMBO_FRAME	| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 5a4501208e9e..41761085e156 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -29,11 +29,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -59,9 +59,9 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		return rc;
 
 	if (frame_size > NIX_L2_MAX_LEN)
-		dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* Update max_rx_pkt_len */
 	data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -590,17 +590,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index c2a36883cbf2..e1654ef5b284 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -890,8 +890,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -933,8 +933,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 63a33142a579..3fe6727f1d2a 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1188,7 +1188,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..68cef1caa394 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -85,8 +85,8 @@ otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				rss->ind_tbl[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -118,8 +118,8 @@ otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = rss->ind_tbl[j];
 	}
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index ffeade5952dc..986902287b67 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ff299f00b913..c60190074926 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index a243683d61d3..7bfa6098e230 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,15 +33,15 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
-	devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+	devinfo->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index a7d433547e36..77593111f141 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,13 +954,13 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index a8774b7a432a..13d18e875444 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -135,10 +135,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -655,7 +655,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -710,7 +710,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index feec4d10a26e..a74f27bf8158 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -613,9 +613,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 323d46e6ebb2..81c35358dc57 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1069,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	/* Configure default RETA */
 	memset(reta_conf, 0, sizeof(reta_conf));
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-		id = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		id = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		q = i % QEDE_RSS_COUNT(eth_dev);
 		reta_conf[id].reta[pos] = q;
 	}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1313,12 +1313,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	}
 
 	/* If jumbo enabled adjust MTU */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		eth_dev->data->mtu =
 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
 			RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1327,8 +1327,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1391,35 +1391,35 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_JUMBO_FRAME |
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1431,17 +1431,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1468,10 +1468,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1480,11 +1480,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2019,12 +2019,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2048,13 +2048,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2095,14 +2095,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2228,7 +2228,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2252,8 +2252,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 
 	for_each_hwfn(edev, i) {
 		for (j = 0; j < reta_size; j++) {
-			idx = j / RTE_RETA_GROUP_SIZE;
-			shift = j % RTE_RETA_GROUP_SIZE;
+			idx = j / RTE_ETH_RETA_GROUP_SIZE;
+			shift = j % RTE_ETH_RETA_GROUP_SIZE;
 			if (reta_conf[idx].mask & (1ULL << shift)) {
 				entry = reta_conf[idx].reta[shift];
 				fid = entry * edev->num_hwfns + i;
@@ -2289,15 +2289,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift)) {
 			entry = qdev->rss_ind_table[i];
 			reta_conf[idx].reta[shift] = entry;
@@ -2369,9 +2369,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		}
 	}
 	if (frame_size > QEDE_ETH_MAX_LEN)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev->data->dev_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (!dev->data->dev_started && restart) {
 		qede_dev_start(dev);
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..ceb47c17d0d6 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 298f4e3e4273..144dfef269f3 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 1faf38a714cf..8d1ef5fb22bc 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 274a98e228e4..d93f9d2418b9 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -81,13 +81,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -96,17 +96,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -337,10 +337,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -827,7 +827,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -836,8 +836,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d4cb96881cd2..ca8774ad0950 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -916,11 +916,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index 522e9a0d3470..7c91ee3fcb53 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -942,16 +942,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 2db0d000c3ad..33f800c46e59 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -102,19 +102,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -142,8 +142,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -912,16 +912,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -952,16 +952,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1070,7 +1070,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	 */
 	if (mtu > RTE_ETHER_MTU) {
 		struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	}
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
@@ -1247,7 +1247,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1472,9 +1472,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
@@ -1601,7 +1601,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	/*
 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
-	 * hence, conversion is done here to derive a correct set of ETH_RSS
+	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
 	 * flags which corresponds to the active EFX configuration stored
 	 * locally in 'sfc_adapter' and kept up-to-date
 	 */
@@ -1727,8 +1727,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp = entry / RTE_RETA_GROUP_SIZE;
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
 		if ((reta_conf[grp].mask >> grp_idx) & 1)
 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1777,10 +1777,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 		struct rte_eth_rss_reta_entry64 *grp;
 
-		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
 		if (grp->mask & (1ull << grp_idx)) {
 			if (grp->reta[grp_idx] >= rss->channels) {
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 4f5993a68d23..dc2cdfea13c4 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -390,7 +390,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index adb2b2cb8175..dea5272a79bc 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -387,7 +387,7 @@ sfc_port_configure(struct sfc_adapter *sa)
 
 	sfc_log_init(sa, "entry");
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
 		port->pdu = rxmode->max_rx_pkt_len;
 	else
 		port->pdu = EFX_MAC_PDU(dev_data->mtu);
@@ -577,66 +577,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 280e8a61f9e0..a83b47a8d111 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -647,9 +647,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -930,7 +930,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -940,7 +940,7 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
 {
 	uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
 
-	caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	caps |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	return caps & sfc_rx_get_offload_mask(sa);
 }
@@ -1141,7 +1141,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1167,15 +1167,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
@@ -1205,7 +1205,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1301,19 +1301,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1633,10 +1633,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1653,16 +1653,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1808,7 +1808,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 49b239f4d261..359acc71a47f 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -515,23 +515,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -862,9 +862,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1228,13 +1228,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 7416a6b1b816..255444a4181d 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1204,10 +1204,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index c515de3bf71d..ad5980ef5280 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1765,7 +1765,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1773,7 +1773,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2267,7 +2267,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2441,7 +2441,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index fc1844ddfce1..8d02fbae7274 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -177,9 +177,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EINVAL;
 
 	if (frame_size > NIC_HW_L2_MAX_LEN)
-		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (nicvf_mbox_update_hw_max_frs(nic, mtu))
 		return -EINVAL;
@@ -404,35 +404,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -445,28 +445,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -493,8 +493,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = tbl[j];
 	}
@@ -523,8 +523,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				tbl[j] = reta_conf[i].reta[j];
 	}
@@ -821,9 +821,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -884,7 +884,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -1007,7 +1007,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1397,11 +1397,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1430,10 +1430,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1597,8 +1597,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1727,11 +1727,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU based on max_rx_pkt_len or default */
-	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
+	mtu = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME ?
 		dev->data->dev_conf.rxmode.max_rx_pkt_len
 			-  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
 
@@ -1914,8 +1914,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1927,8 +1927,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1938,7 +1938,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1973,7 +1973,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2050,8 +2050,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index b8dd905d0bd6..c1876bb9e1b7 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,33 +16,33 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_JUMBO_FRAME | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 006399468841..c6e8a14ddf3f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -997,7 +997,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1032,7 +1032,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1052,7 +1052,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1137,10 +1137,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1239,7 +1239,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1253,17 +1253,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1274,25 +1274,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1330,10 +1330,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1356,18 +1356,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1377,13 +1377,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1392,13 +1392,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1413,13 +1413,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1428,15 +1428,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1445,39 +1445,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1494,8 +1494,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1637,7 +1637,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	 *    - half duplex (checked afterwards for valid speeds)
 	 *    - fixed speed: TODO implement
 	 */
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -1704,15 +1704,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1773,8 +1773,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (*link_speeds & ~allowed_speeds) {
@@ -1783,20 +1783,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2611,7 +2611,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2644,11 +2644,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2705,10 +2705,10 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	hw->mac.get_link_status = true;
 
@@ -2722,8 +2722,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2742,34 +2742,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2994,7 +2994,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3225,13 +3225,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3363,16 +3363,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3404,16 +3404,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3593,12 +3593,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3622,15 +3622,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4281,15 +4281,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4645,7 +4645,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4656,7 +4656,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4680,9 +4680,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4695,7 +4695,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4925,7 +4925,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4956,7 +4956,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4996,7 +4996,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5004,7 +5004,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5012,7 +5012,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5020,7 +5020,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5052,7 +5052,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5062,7 +5062,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5072,7 +5072,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5082,7 +5082,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 3021933965c8..75a9e2580e27 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -56,15 +56,15 @@
 #define TXGBE_5TUPLE_MIN_PRI            1
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 18ed94bd277b..05773cb20786 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -491,14 +491,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -579,22 +579,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -652,8 +652,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -896,10 +896,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index 494d779a3c9d..44f6f103edd2 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -103,15 +103,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -258,13 +258,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -613,29 +613,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 1a261287d1bd..c302d49af728 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1939,7 +1939,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1949,35 +1949,35 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_JUMBO_FRAME |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2202,32 +2202,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2329,7 +2329,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2579,7 +2579,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2880,20 +2880,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2910,20 +2910,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2964,39 +2964,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3026,7 +3026,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3063,12 +3063,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3083,7 +3083,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3091,7 +3091,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3110,7 +3110,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3131,7 +3131,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3201,7 +3201,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3217,12 +3217,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3232,7 +3232,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3250,12 +3250,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3265,7 +3265,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3292,7 +3292,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3319,7 +3319,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3455,7 +3455,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3466,8 +3466,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3480,7 +3480,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3491,7 +3491,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3507,15 +3507,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3556,7 +3556,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3572,7 +3572,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3614,7 +3614,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3628,7 +3628,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3699,12 +3699,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3760,7 +3760,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3884,11 +3884,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3911,15 +3911,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3942,21 +3942,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3967,18 +3967,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4008,7 +4008,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4018,13 +4018,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4087,10 +4087,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4098,22 +4098,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4253,7 +4253,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4296,7 +4296,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4305,7 +4305,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
 			TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
 	} else {
@@ -4329,7 +4329,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4339,7 +4339,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4376,11 +4376,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4395,7 +4395,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4404,7 +4404,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4527,8 +4527,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4836,7 +4836,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4888,7 +4888,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (rxmode->max_rx_pkt_len +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4897,8 +4897,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5069,7 +5069,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a202931e9aed..778460aab5e1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -823,7 +823,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -858,7 +858,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1124,7 +1124,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1273,9 +1273,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e58085a2c95a..00bbbb2b3537 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -703,7 +703,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1763,7 +1763,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1777,7 +1777,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1876,7 +1876,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1948,22 +1948,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2079,14 +2079,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2104,20 +2104,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2129,15 +2129,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2149,12 +2149,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2207,7 +2207,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2234,10 +2234,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2401,7 +2401,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2440,28 +2440,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2474,8 +2474,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2485,8 +2485,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2508,33 +2508,33 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	return 0;
 }
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 1a3291273a11..825a6adfc2b1 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,21 +41,21 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_JUMBO_FRAME |   \
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_JUMBO_FRAME |   \
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -399,9 +399,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -487,8 +487,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -548,7 +548,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -844,15 +844,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -864,7 +864,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -931,7 +931,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1040,9 +1040,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1372,7 +1372,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1454,10 +1454,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1510,7 +1510,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1580,8 +1580,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1590,8 +1590,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 59bee9723cfc..7588ba929b65 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 5cf53d4de825..0f2671f528f4 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 5251db0b1674..ecc6ef2965ee 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -71,12 +71,12 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -334,7 +334,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index f48400e21156..e4c627e203a4 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -116,18 +116,18 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -151,9 +151,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -243,9 +243,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 1b1029660e77..e6af8420e4c6 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,16 +80,16 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -127,9 +127,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 21ed85c7d6c9..5053d174335c 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index f70ab0cc9e38..3ac98add5692 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,14 +283,14 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -312,12 +312,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index ca6cd200caad..5780928d75ee 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,14 +614,14 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -643,9 +643,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index db71f5aa0401..f44ee65372ff 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -218,9 +218,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index 29fb4b3d55ef..150406e385d4 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index 0c413180f889..94e3ac91b299 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,13 +819,13 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -853,9 +853,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index f24536972084..aa41fcc1d037 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -148,14 +148,14 @@ static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -624,7 +624,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 16bcffe356bc..f6ecd9b0fe3a 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -77,11 +77,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 8645ac790be4..8aabea002bbb 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -161,22 +161,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_JUMBO_FRAME),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_JUMBO_FRAME),
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -740,7 +740,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1097,9 +1097,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index f252d34985b4..73932564e459 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -234,20 +234,20 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1456,10 +1456,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1908,7 +1908,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2211,12 +2211,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 
 	frame_size = MTU_TO_FRAMELEN(mtu_size);
 	if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	local_port_conf.rxmode.max_rx_pkt_len = frame_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2239,12 +2239,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2302,7 +2302,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2663,7 +2663,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index cc527d7f6b38..96fb325ff180 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -112,11 +112,11 @@ static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
+		.offloads = RTE_ETH_RX_OFFLOAD_JUMBO_FRAME,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -620,7 +620,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index beabb3c848aa..81124dc0dc88 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -95,7 +95,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -608,9 +608,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -688,7 +688,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
 	memcpy(&conf, &port_conf, sizeof(conf));
 	/* Set new MTU */
 	if (new_mtu > RTE_ETHER_MAX_LEN)
-		conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+		conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+		conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 
 	/* mtu + length of header + length of FCS = max pkt length */
 	conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 5f539c458cdd..89489843e2bd 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -216,12 +216,12 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1809,7 +1809,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2633,9 +2633,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index b8c1e02d7598..80a72f7095cf 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -15,7 +15,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -23,9 +23,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -61,9 +61,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index bbb4a27a6d54..2e50339afb61 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -726,7 +726,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -869,9 +869,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 4e1a17cfe4f5..d228a842788d 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -478,7 +478,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -650,9 +650,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 911e40c66e0e..b4a69dde63dc 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -95,7 +95,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -606,7 +606,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index a1f457b564b6..9323426e9b1d 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -124,20 +124,20 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1815,9 +1815,9 @@ parse_args(int argc, char **argv)
 
 			printf("jumbo frame is enabled\n");
 			port_conf.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			port_conf.txmode.offloads |=
-					DEV_TX_OFFLOAD_MULTI_SEGS;
+					RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, then use the
@@ -1970,7 +1970,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2080,9 +2080,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index a0de8ca9b42d..278fe95970f3 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,18 +111,18 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -494,8 +494,8 @@ parse_args(int argc, char **argv)
 			const struct option lenopts = {"max-pkt-len",
 						       required_argument, 0, 0};
 
-			port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-			port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+			port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+			port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, use the default
@@ -628,7 +628,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -807,9 +807,9 @@ main(int argc, char **argv)
 		       nb_rx_queue, n_tx_queue);
 
 		rte_eth_dev_info_get(portid, &dev_info);
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index aa7b8db44ae8..85609e9d4593 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -250,19 +250,19 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -1961,9 +1961,9 @@ parse_args(int argc, char **argv)
 
 				printf("jumbo frame is enabled \n");
 				port_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_JUMBO_FRAME;
+						RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 				port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MULTI_SEGS;
+						RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 				/**
 				 * if no max-pkt-len set, use the default value
@@ -2222,7 +2222,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2622,9 +2622,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 00ac267af1dd..500444565463 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,19 +120,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -703,8 +703,8 @@ parse_args(int argc, char **argv)
 				"max-pkt-len", required_argument, 0, 0
 			};
 
-			port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-			port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+			port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+			port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/*
 			 * if no max-pkt-len set, use the default
@@ -926,7 +926,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1035,15 +1035,15 @@ l3fwd_poll_resource_setup(void)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 7470aa539a90..6880b58476f4 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -147,7 +147,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -507,7 +507,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -634,9 +634,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 1ad71ca7ec5f..23307073c904 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -94,7 +94,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -213,7 +213,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 01dc3acf34d5..85955375f1bf 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -176,18 +176,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -218,9 +218,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -392,7 +392,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d2fe9f6b50d8..eb15899c902f 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 2f593abf263d..86671655b432 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -307,19 +307,19 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -2988,9 +2988,9 @@ parse_args(int argc, char **argv)
 
 			printf("jumbo frame is enabled - disabling simple TX path\n");
 			port_conf.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 			port_conf.txmode.offloads |=
-					DEV_TX_OFFLOAD_MULTI_SEGS;
+					RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 			/* if no max-pkt-len set, use the default value
 			 * RTE_ETHER_MAX_LEN
@@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3577,9 +3577,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 467cda5a6dac..7ea670f109b8 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -165,11 +165,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 4f32ade7fbf7..db32b0d6c427 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -197,14 +197,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 7ffccc8369dc..5ef14c176b11 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,19 +51,19 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -333,8 +333,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -379,8 +379,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1abe003fc6ae..e750928fb89d 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -61,7 +61,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -106,9 +106,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6f20f98b2b30..08df716dc0fb 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -145,17 +145,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 9ebd88bac20e..074fee5b26b2 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -96,7 +96,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -115,9 +115,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -277,7 +277,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index ae08261befd7..737df4ca2a17 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -55,9 +55,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index bc3d71c8984e..b1d363ae21db 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -109,23 +109,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -133,7 +133,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -290,9 +290,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -562,8 +562,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
@@ -638,7 +638,7 @@ us_vhost_parse_args(int argc, char **argv)
 			mergeable = !!ret;
 			if (ret) {
 				vmdq_conf_default.rxmode.offloads |=
-					DEV_RX_OFFLOAD_JUMBO_FRAME;
+					RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
 				vmdq_conf_default.rxmode.max_rx_pkt_len
 					= JUMBO_FRAME_MAX_SIZE;
 			}
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 7d5bf6855426..dddcde40efe2 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -78,9 +78,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -278,7 +278,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index d3bc19f78ee5..16782a5d850f 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -66,12 +66,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -79,7 +79,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -157,11 +157,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -259,9 +259,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 685a03bdd194..3677a34da849 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -60,8 +60,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -69,11 +69,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -81,7 +81,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -89,12 +89,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -103,7 +103,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -157,7 +157,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -173,11 +173,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -271,9 +271,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -390,9 +390,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -412,9 +412,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9d95cd11e1b5..9ccbd7db4063 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -98,9 +98,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -126,14 +123,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1184,32 +1181,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1458,7 +1455,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If jumbo frames are enabled, check that the maximum RX packet
 	 * length is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
 			RTE_ETHDEV_LOG(ERR,
 				"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
@@ -1491,7 +1488,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1543,12 +1540,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2157,7 +2154,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
 			dev->data->dev_conf.rxmode.max_lro_pkt_size =
 				dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -2752,21 +2749,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2790,14 +2787,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3663,7 +3660,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3750,44 +3747,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3832,17 +3829,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -3919,7 +3916,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -3937,7 +3934,7 @@ eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
 {
 	uint16_t i, num;
 
-	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
 	for (i = 0; i < num; i++) {
 		if (reta_conf[i].mask)
 			return 0;
@@ -3959,8 +3956,8 @@ eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
 			RTE_ETHDEV_LOG(ERR,
@@ -4116,7 +4113,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4142,7 +4139,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4283,8 +4280,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -4548,21 +4545,21 @@ rte_eth_mirror_rule_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (mirror_conf->dst_pool >= ETH_64_POOLS) {
+	if (mirror_conf->dst_pool >= RTE_ETH_64_POOLS) {
 		RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
-			ETH_64_POOLS - 1);
+			RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
-	if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
-	     ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
+	if ((mirror_conf->rule_type & (RTE_ETH_MIRROR_VIRTUAL_POOL_UP |
+	     RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
 	    (mirror_conf->pool_mask == 0)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Invalid mirror pool, pool mask can not be 0\n");
 		return -EINVAL;
 	}
 
-	if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
+	if ((mirror_conf->rule_type & RTE_ETH_MIRROR_VLAN) &&
 	    mirror_conf->vlan.vlan_mask == 0) {
 		RTE_ETHDEV_LOG(ERR,
 			"Invalid vlan mask, vlan mask can not be 0\n");
@@ -6238,7 +6235,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index d2b27c351fdb..cabfe452c808 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -249,7 +249,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -279,61 +279,99 @@ struct rte_eth_stats {
 /**
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG	RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED	RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD	RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M	RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD	RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M	RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G	RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G	RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G	RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G	RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G	RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G	RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G	RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G	RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G	RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G	RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G	RTE_ETH_LINK_SPEED_200G
 
 /**
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE	RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M	RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M	RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G	RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G	RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G	RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G	RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G	RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G	RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G	RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G	RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G	RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G	RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G	RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN	RTE_ETH_SPEED_NUM_UNKNOWN
 
 /**
  * A structure used to retrieve link-level information of an Ethernet port.
  */
 __extension__
 struct rte_eth_link {
-	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
-	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
-	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX	RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX	RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN		RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP		RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED		RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG	RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 
 /**
@@ -349,9 +387,12 @@ struct rte_eth_thresh {
 /**
  *  Simple flags are used for rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1
-#define ETH_MQ_RX_DCB_FLAG  0x2
-#define ETH_MQ_RX_VMDQ_FLAG 0x4
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG	RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG	RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG	RTE_ETH_MQ_RX_VMDQ_FLAG
 
 /**
  *  A set of values to identify what method is to be used to route
@@ -359,50 +400,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB,RSS or VMDQ mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For RX side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For RX side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDQ, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDQ */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDQ+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDQ and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the RX features of an Ethernet port.
@@ -415,7 +455,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -430,12 +470,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a vlan filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -506,59 +551,96 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               (1ULL << 2)
-#define ETH_RSS_FRAG_IPV4          (1ULL << 3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
-#define ETH_RSS_IPV6               (1ULL << 8)
-#define ETH_RSS_FRAG_IPV6          (1ULL << 9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
-#define ETH_RSS_L2_PAYLOAD         (1ULL << 14)
-#define ETH_RSS_IPV6_EX            (1ULL << 15)
-#define ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
-#define ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
-#define ETH_RSS_PORT               (1ULL << 18)
-#define ETH_RSS_VXLAN              (1ULL << 19)
-#define ETH_RSS_GENEVE             (1ULL << 20)
-#define ETH_RSS_NVGRE              (1ULL << 21)
-#define ETH_RSS_GTPU               (1ULL << 23)
-#define ETH_RSS_ETH                (1ULL << 24)
-#define ETH_RSS_S_VLAN             (1ULL << 25)
-#define ETH_RSS_C_VLAN             (1ULL << 26)
-#define ETH_RSS_ESP                (1ULL << 27)
-#define ETH_RSS_AH                 (1ULL << 28)
-#define ETH_RSS_L2TPV3             (1ULL << 29)
-#define ETH_RSS_PFCP               (1ULL << 30)
-#define ETH_RSS_PPPOE		   (1ULL << 31)
-#define ETH_RSS_ECPRI		   (1ULL << 32)
-#define ETH_RSS_MPLS		   (1ULL << 33)
+#define RTE_ETH_RSS_IPV4               (1ULL << 2)
+#define ETH_RSS_IPV4		RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          (1ULL << 3)
+#define ETH_RSS_FRAG_IPV4	RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_TCP	RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_UDP	RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP	RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER	RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               (1ULL << 8)
+#define ETH_RSS_IPV6		RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          (1ULL << 9)
+#define ETH_RSS_FRAG_IPV6	RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_TCP	RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_UDP	RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP	RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER	RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         (1ULL << 14)
+#define ETH_RSS_L2_PAYLOAD	RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            (1ULL << 15)
+#define ETH_RSS_IPV6_EX		RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
+#define ETH_RSS_IPV6_TCP_EX	RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
+#define ETH_RSS_IPV6_UDP_EX	RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               (1ULL << 18)
+#define ETH_RSS_PORT		RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              (1ULL << 19)
+#define ETH_RSS_VXLAN		RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             (1ULL << 20)
+#define ETH_RSS_GENEVE		RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              (1ULL << 21)
+#define ETH_RSS_NVGRE		RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               (1ULL << 23)
+#define ETH_RSS_GTPU		RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                (1ULL << 24)
+#define ETH_RSS_ETH		RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             (1ULL << 25)
+#define ETH_RSS_S_VLAN		RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             (1ULL << 26)
+#define ETH_RSS_C_VLAN		RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                (1ULL << 27)
+#define ETH_RSS_ESP		RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 (1ULL << 28)
+#define ETH_RSS_AH		RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             (1ULL << 29)
+#define ETH_RSS_L2TPV3		RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               (1ULL << 30)
+#define ETH_RSS_PFCP		RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              (1ULL << 31)
+#define ETH_RSS_PPPOE		RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              (1ULL << 32)
+#define ETH_RSS_ECPRI		RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               (1ULL << 33)
+#define ETH_RSS_MPLS		RTE_ETH_RSS_MPLS
 
 /*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
  * more specific input set selection. These bits are defined starting
  * from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
-#define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
-#define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
-#define ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
-#define ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
+#define ETH_RSS_L3_SRC_ONLY	RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        (1ULL << 62)
+#define ETH_RSS_L3_DST_ONLY	RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
+#define ETH_RSS_L4_SRC_ONLY	RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
+#define ETH_RSS_L2_SRC_ONLY	RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define ETH_RSS_L2_DST_ONLY	RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
  * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
  */
 #define RTE_ETH_RSS_L3_PRE32	   (1ULL << 57)
 #define RTE_ETH_RSS_L3_PRE40	   (1ULL << 56)
@@ -580,22 +662,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT	RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST	RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST	RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK	RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)	RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -610,222 +697,286 @@ struct rte_eth_rss_conf {
 static inline uint64_t
 rte_eth_rss_hf_refine(uint64_t rss_hf)
 {
-	if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 
-	if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /**< Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64	RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128	RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256	RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512	RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE	RTE_ETH_RETA_GROUP_SIZE
 
 /* Definitions used for VMDQ and DCB functionality */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS	RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES	RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES	RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES	RTE_ETH_DCB_NUM_QUEUES
 
 /* DCB capability defines */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT	RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT	RTE_ETH_DCB_PFC_SUPPORT
 
 /* Definitions used for VLAN Offload functionality */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD	RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD	RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD	RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD	RTE_ETH_QINQ_STRIP_OFFLOAD
 
 /* Definitions used for mask VLAN setting */
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK	RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK	RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK	RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK	RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX		RTE_ETH_VLAN_ID_MAX
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR	RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY	RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /* Definitions used for VMDQ pool rx mode setting */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG	RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC	RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC	RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST	RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST	RTE_ETH_VMDQ_ACCEPT_MULTICAST
 
 /** Maximum nb. of vlan per mirror rule */
-#define ETH_MIRROR_MAX_VLANS       64
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS	RTE_ETH_MIRROR_MAX_VLANS
 
-#define ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
-#define ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
-#define ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
-#define ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
-#define ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP	RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT	RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT	RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN		RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN	RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
 
 /**
  * A structure used to configure VLAN traffic mirror of an Ethernet port.
@@ -833,7 +984,7 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 struct rte_eth_vlan_mirror {
 	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
 	/** VLAN ID list for vlan mirroring. */
-	uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
+	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
 };
 
 /**
@@ -856,7 +1007,7 @@ struct rte_eth_mirror_conf {
 struct rte_eth_rss_reta_entry64 {
 	uint64_t mask;
 	/**< Mask bits indicate which entries need to be updated/queried. */
-	uint16_t reta[RTE_RETA_GROUP_SIZE];
+	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
 	/**< Group of 64 redirection table entries. */
 };
 
@@ -865,38 +1016,44 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDQ configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_dcb_tx_conf {
 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_dcb_tx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_tx_conf {
@@ -922,8 +1079,8 @@ struct rte_eth_vmdq_dcb_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 	/**< Selects a queue in a pool */
 };
 
@@ -934,7 +1091,7 @@ struct rte_eth_vmdq_dcb_conf {
  * Using this feature, packets are routed to a pool of queues. By default,
  * the pool selection is based on the MAC address, the vlan id in the
  * vlan tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
  * selection using only the MAC address. MAC address to pool mapping is done
  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
  * corresponding to the pool id.
@@ -955,7 +1112,7 @@ struct rte_eth_vmdq_rx_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
 };
 
 /**
@@ -964,7 +1121,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1048,7 +1205,7 @@ struct rte_eth_rxconf {
 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1077,7 +1234,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1188,12 +1345,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
-	RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+	RTE_ETH_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1224,18 +1386,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1243,11 +1416,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in RX descriptors.
@@ -1264,9 +1442,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** RX queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1275,6 +1453,8 @@ struct rte_fdir_conf {
 	/**< Flex payload configuration. */
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1292,7 +1472,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1301,18 +1481,20 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the RX multi-queue mode, extra advanced
  * configuration settings may be needed.
  */
 struct rte_eth_conf {
-	uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
-				used. ETH_LINK_SPEED_FIXED disables link
+	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+				used. RTE_ETH_LINK_SPEED_FIXED disables link
 				autonegotiation, and a unique speed shall be
 				set. Otherwise, the bitmap defines the set of
 				speeds to be advertised. If the special value
-				ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
 				supported are advertised. */
 	struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
 	struct rte_eth_txmode txmode; /**< Port TX configuration. */
@@ -1338,49 +1520,72 @@ struct rte_eth_conf {
 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
 		/**< Port vmdq TX configuration. */
 	} tx_adv_conf; /**< Port TX DCB configuration (union). */
-	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
-	    is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+	/**
+	 * Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
+	 * is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT.
+	 */
 	uint32_t dcb_capability_en;
-	struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
-	struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+	struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
 };
 
 /**
  * RX offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_JUMBO_FRAME	0x00000800
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP  0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP	RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM	RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO     0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO		RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP  0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP	RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP	RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT	0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT	RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER	RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME	0x00000800
+#define DEV_RX_OFFLOAD_JUMBO_FRAME	RTE_ETH_RX_OFFLOAD_JUMBO_FRAME
+#define RTE_ETH_RX_OFFLOAD_SCATTER	0x00002000
+#define DEV_RX_OFFLOAD_SCATTER		RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP	0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP	RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY     0x00008000
+#define DEV_RX_OFFLOAD_SECURITY		RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC	0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC		RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH	0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH	RTE_ETH_RX_OFFLOAD_RSS_HASH
 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
 
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1390,52 +1595,74 @@ struct rte_eth_conf {
 /**
  * TX offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT	RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM	RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO     0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO		RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO     0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO		RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT	RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT	RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS	RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**< Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 /**< Device supports optimization for fast release of mbufs.
  *   When set application must guarantee that per-queue all mbufs comes from
  *   the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY	RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO	RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP	RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1567,7 +1794,7 @@ struct rte_eth_dev_info {
 	uint16_t vmdq_pool_base;  /**< First ID of VMDQ pools. */
 	struct rte_eth_desc_lim rx_desc_lim;  /**< RX descriptors limits */
 	struct rte_eth_desc_lim tx_desc_lim;  /**< TX descriptors limits */
-	uint32_t speed_capa;  /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 	/** Configured number of rx/tx queues */
 	uint16_t nb_rx_queues; /**< Number of RX queues. */
 	uint16_t nb_tx_queues; /**< Number of TX queues. */
@@ -1672,8 +1899,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS	RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL	RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1684,12 +1913,12 @@ struct rte_eth_dcb_tc_queue_mapping {
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 	/** rx queues assigned to tc per Pool */
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 };
 
 /**
@@ -1698,8 +1927,8 @@ struct rte_eth_dcb_tc_queue_mapping {
  */
 struct rte_eth_dcb_info {
 	uint8_t nb_tcs;        /**< number of TCs */
-	uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
-	uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
+	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
 	/** rx queues assigned to tc */
 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
 };
@@ -1723,7 +1952,7 @@ enum rte_eth_fec_mode {
 
 /* A structure used to get capabilities per link speed */
 struct rte_eth_fec_capa {
-	uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
 	uint32_t capa;  /**< FEC capabilities bitmask */
 };
 
@@ -1749,13 +1978,17 @@ struct rte_eth_fec_capa {
  */
 
 /**< l2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK	RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /**< l2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK	RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /**< l2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK	RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /**< l2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK	RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 
 /**
  * Function type used for RX packet processing packet callbacks.
@@ -2068,14 +2301,14 @@ uint16_t rte_eth_dev_count_total(void);
  * @param speed
  *   Numerical speed value in Mbps
  * @param duplex
- *   ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
  * @return
  *   0 if the speed cannot be mapped
  */
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2085,7 +2318,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2179,7 +2412,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -2756,7 +2989,7 @@ const char *rte_eth_link_speed_to_str(uint32_t link_speed);
  *
  * @param str
  *   A pointer to a string to be filled with textual representation of
- *   device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
  *   store default link status text.
  * @param len
  *   Length of available memory at 'str' string.
@@ -3261,10 +3494,10 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  *   The port identifier of the Ethernet device.
  * @param offload_mask
  *   The VLAN Offload bit mask can be mixed use with "OR"
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
@@ -3280,10 +3513,10 @@ int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
  *   The port identifier of the Ethernet device.
  * @return
  *   - (>0) if successful. Bit mask to indicate
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  *   - (-ENODEV) if *port_id* invalid.
  */
 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
@@ -5231,7 +5464,7 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index edf96de2dc2e..8e6156a62aa9 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -154,7 +154,7 @@ struct rte_eth_dev_data {
 			/**< Device Ethernet link address.
 			 *   @see rte_eth_dev_release_port()
 			 */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 			/**< Bitmap associating MAC addresses to pools. */
 	struct rte_ether_addr *hash_mac_addrs;
 			/**< Device Ethernet MAC addresses of hash filtering.
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index 70f455d47d60..4152067368b8 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2593,7 +2593,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index bb38d7f58102..50e611e887bf 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -192,7 +192,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -215,7 +215,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -258,7 +258,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -271,7 +271,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index 13f06d8ed25b..be43f8c328e1 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v3] ethdev: add namespace
  2021-08-30 17:19   ` [dpdk-dev] [PATCH v3] " Ferruh Yigit
@ 2021-08-31  7:59     ` Thomas Monjalon
  2021-10-18 15:43     ` [dpdk-dev] [PATCH v4] " Ferruh Yigit
  1 sibling, 0 replies; 32+ messages in thread
From: Thomas Monjalon @ 2021-08-31  7:59 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Andrew Rybchenko,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko,
	Keith Wiles, Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal,
	Declan Doherty, Ray Kinsella, Radu Nicolau, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Ciara Loftus,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Matt Peters, Somalapuram Amaranath, Rasesh Mody,
	Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dev, Tyler Retzlaff, David Marchand

30/08/2021 19:19, Ferruh Yigit:
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> way. The macros for backward compatibility can be removed in next LTS.
> Also updated some struct names to have 'rte_eth' prefix.
> 
> All internal components switched to using new names.
> 
> Syntax fixed on lines that this patch touches.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Acked-by: Jerin Jacob <jerinj@marvell.com>
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
> Acked-by: Rosen Xu <rosen.xu@intel.com>
> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
> Cc: David Marchand <david.marchand@redhat.com>
> 
> v2:
> * Updated internal components
> * Removed deprecation notice
> 
> v3:
> * Updated missing macros / structs that David highlighted
> * Added release notes update
> ---
>  app/proc-info/main.c                          |   8 +-
>  app/test-eventdev/test_perf_common.c          |   4 +-
>  app/test-eventdev/test_pipeline_common.c      |  12 +-
>  app/test-flow-perf/config.h                   |   2 +-
>  app/test-pipeline/init.c                      |   8 +-
>  app/test-pmd/cmdline.c                        | 298 +++---
>  app/test-pmd/config.c                         | 202 ++--
>  app/test-pmd/csumonly.c                       |  28 +-
>  app/test-pmd/flowgen.c                        |   6 +-
>  app/test-pmd/macfwd.c                         |   6 +-
>  app/test-pmd/macswap_common.h                 |   6 +-
>  app/test-pmd/parameters.c                     |  54 +-
>  app/test-pmd/testpmd.c                        |  60 +-
>  app/test-pmd/testpmd.h                        |   2 +-
>  app/test-pmd/txonly.c                         |   6 +-
>  app/test/test_ethdev_link.c                   |  68 +-
>  app/test/test_event_eth_rx_adapter.c          |   4 +-
>  app/test/test_kni.c                           |   2 +-
>  app/test/test_link_bonding.c                  |   4 +-
>  app/test/test_link_bonding_mode4.c            |   4 +-
>  app/test/test_link_bonding_rssconf.c          |  28 +-
>  app/test/test_pmd_perf.c                      |  12 +-
>  app/test/virtual_pmd.c                        |  10 +-
>  doc/guides/eventdevs/cnxk.rst                 |   2 +-
>  doc/guides/eventdevs/octeontx2.rst            |   2 +-
>  doc/guides/howto/debug_troubleshoot.rst       |   2 +-
>  doc/guides/nics/bnxt.rst                      |  26 +-
>  doc/guides/nics/enic.rst                      |   2 +-
>  doc/guides/nics/features.rst                  | 116 +-
>  doc/guides/nics/fm10k.rst                     |   6 +-
>  doc/guides/nics/intel_vf.rst                  |  10 +-
>  doc/guides/nics/ixgbe.rst                     |  12 +-
>  doc/guides/nics/mlx5.rst                      |   4 +-
>  doc/guides/nics/tap.rst                       |   2 +-
>  .../generic_segmentation_offload_lib.rst      |   8 +-
>  doc/guides/prog_guide/mbuf_lib.rst            |  18 +-
>  doc/guides/prog_guide/poll_mode_drv.rst       |   8 +-
>  doc/guides/prog_guide/rte_flow.rst            |  34 +-
>  doc/guides/prog_guide/rte_security.rst        |   2 +-
>  doc/guides/rel_notes/deprecation.rst          |  12 +-
>  doc/guides/rel_notes/release_21_11.rst        |   3 +
>  doc/guides/sample_app_ug/ipsec_secgw.rst      |   4 +-
>  doc/guides/testpmd_app_ug/run_app.rst         |   2 +-
>  drivers/bus/dpaa/include/process.h            |  16 +-
>  drivers/common/cnxk/roc_npc.h                 |   2 +-
>  drivers/net/af_packet/rte_eth_af_packet.c     |  16 +-
>  drivers/net/af_xdp/rte_eth_af_xdp.c           |  12 +-
>  drivers/net/ark/ark_ethdev.c                  |  16 +-
>  drivers/net/atlantic/atl_ethdev.c             |  90 +-
>  drivers/net/atlantic/atl_ethdev.h             |  18 +-
>  drivers/net/atlantic/atl_rxtx.c               |   6 +-
>  drivers/net/avp/avp_ethdev.c                  |  26 +-
>  drivers/net/axgbe/axgbe_dev.c                 |   6 +-
>  drivers/net/axgbe/axgbe_ethdev.c              | 110 +-
>  drivers/net/axgbe/axgbe_ethdev.h              |  12 +-
>  drivers/net/axgbe/axgbe_mdio.c                |   2 +-
>  drivers/net/axgbe/axgbe_rxtx.c                |   6 +-
>  drivers/net/bnx2x/bnx2x_ethdev.c              |  16 +-
>  drivers/net/bnxt/bnxt.h                       |  68 +-
>  drivers/net/bnxt/bnxt_ethdev.c                | 178 ++--
>  drivers/net/bnxt/bnxt_flow.c                  |   4 +-
>  drivers/net/bnxt/bnxt_hwrm.c                  | 112 +-
>  drivers/net/bnxt/bnxt_reps.c                  |   2 +-
>  drivers/net/bnxt/bnxt_ring.c                  |   4 +-
>  drivers/net/bnxt/bnxt_rxq.c                   |  28 +-
>  drivers/net/bnxt/bnxt_rxr.c                   |   4 +-
>  drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |   2 +-
>  drivers/net/bnxt/bnxt_rxtx_vec_common.h       |   2 +-
>  drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |   2 +-
>  drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |   2 +-
>  drivers/net/bnxt/bnxt_txr.c                   |   4 +-
>  drivers/net/bnxt/bnxt_vnic.c                  |  30 +-
>  drivers/net/bnxt/rte_pmd_bnxt.c               |   8 +-
>  drivers/net/bonding/eth_bond_private.h        |   4 +-
>  drivers/net/bonding/rte_eth_bond_8023ad.c     |  16 +-
>  drivers/net/bonding/rte_eth_bond_api.c        |   6 +-
>  drivers/net/bonding/rte_eth_bond_pmd.c        |  56 +-
>  drivers/net/cnxk/cn10k_ethdev.c               |  38 +-
>  drivers/net/cnxk/cn10k_rx.c                   |   4 +-
>  drivers/net/cnxk/cn10k_tx.c                   |   4 +-
>  drivers/net/cnxk/cn9k_ethdev.c                |  56 +-
>  drivers/net/cnxk/cn9k_rx.c                    |   4 +-
>  drivers/net/cnxk/cn9k_tx.c                    |   4 +-
>  drivers/net/cnxk/cnxk_ethdev.c                |  84 +-
>  drivers/net/cnxk/cnxk_ethdev.h                |  49 +-
>  drivers/net/cnxk/cnxk_ethdev_devargs.c        |   6 +-
>  drivers/net/cnxk/cnxk_ethdev_ops.c            | 112 +-
>  drivers/net/cnxk/cnxk_link.c                  |  14 +-
>  drivers/net/cnxk/cnxk_ptp.c                   |   4 +-
>  drivers/net/cnxk/cnxk_rte_flow.c              |   2 +-
>  drivers/net/cxgbe/cxgbe.h                     |  48 +-
>  drivers/net/cxgbe/cxgbe_ethdev.c              |  50 +-
>  drivers/net/cxgbe/cxgbe_main.c                |  12 +-
>  drivers/net/cxgbe/sge.c                       |   2 +-
>  drivers/net/dpaa/dpaa_ethdev.c                | 190 ++--
>  drivers/net/dpaa/dpaa_ethdev.h                |  10 +-
>  drivers/net/dpaa/dpaa_flow.c                  |  32 +-
>  drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |  34 +-
>  drivers/net/dpaa2/dpaa2_ethdev.c              | 148 +--
>  drivers/net/dpaa2/dpaa2_ethdev.h              |  12 +-
>  drivers/net/dpaa2/dpaa2_rxtx.c                |   8 +-
>  drivers/net/e1000/e1000_ethdev.h              |  18 +-
>  drivers/net/e1000/em_ethdev.c                 |  68 +-
>  drivers/net/e1000/em_rxtx.c                   |  48 +-
>  drivers/net/e1000/igb_ethdev.c                | 166 +--
>  drivers/net/e1000/igb_pf.c                    |   2 +-
>  drivers/net/e1000/igb_rxtx.c                  | 120 +--
>  drivers/net/ena/ena_ethdev.c                  |  70 +-
>  drivers/net/ena/ena_ethdev.h                  |   4 +-
>  drivers/net/ena/ena_rss.c                     |  76 +-
>  drivers/net/enetc/enetc_ethdev.c              |  38 +-
>  drivers/net/enic/enic.h                       |   2 +-
>  drivers/net/enic/enic_ethdev.c                |  88 +-
>  drivers/net/enic/enic_main.c                  |  40 +-
>  drivers/net/enic/enic_res.c                   |  52 +-
>  drivers/net/failsafe/failsafe.c               |   8 +-
>  drivers/net/failsafe/failsafe_intr.c          |   4 +-
>  drivers/net/failsafe/failsafe_ops.c           |  82 +-
>  drivers/net/fm10k/fm10k.h                     |   4 +-
>  drivers/net/fm10k/fm10k_ethdev.c              | 148 +--
>  drivers/net/fm10k/fm10k_rxtx_vec.c            |   6 +-
>  drivers/net/hinic/base/hinic_pmd_hwdev.c      |  22 +-
>  drivers/net/hinic/hinic_pmd_ethdev.c          | 142 +--
>  drivers/net/hinic/hinic_pmd_rx.c              |  36 +-
>  drivers/net/hinic/hinic_pmd_rx.h              |  22 +-
>  drivers/net/hns3/hns3_dcb.c                   |  14 +-
>  drivers/net/hns3/hns3_ethdev.c                | 360 +++----
>  drivers/net/hns3/hns3_ethdev.h                |  12 +-
>  drivers/net/hns3/hns3_ethdev_vf.c             | 108 +-
>  drivers/net/hns3/hns3_flow.c                  |   6 +-
>  drivers/net/hns3/hns3_ptp.c                   |   2 +-
>  drivers/net/hns3/hns3_rss.c                   | 108 +-
>  drivers/net/hns3/hns3_rss.h                   |  28 +-
>  drivers/net/hns3/hns3_rxtx.c                  |  30 +-
>  drivers/net/hns3/hns3_rxtx.h                  |   2 +-
>  drivers/net/hns3/hns3_rxtx_vec.c              |  10 +-
>  drivers/net/i40e/i40e_ethdev.c                | 278 ++---
>  drivers/net/i40e/i40e_ethdev.h                |  24 +-
>  drivers/net/i40e/i40e_ethdev_vf.c             | 118 +--
>  drivers/net/i40e/i40e_flow.c                  |   2 +-
>  drivers/net/i40e/i40e_hash.c                  | 156 +--
>  drivers/net/i40e/i40e_pf.c                    |  14 +-
>  drivers/net/i40e/i40e_rxtx.c                  |  10 +-
>  drivers/net/i40e/i40e_rxtx.h                  |   4 +-
>  drivers/net/i40e/i40e_rxtx_vec_avx512.c       |   2 +-
>  drivers/net/i40e/i40e_rxtx_vec_common.h       |   8 +-
>  drivers/net/i40e/i40e_vf_representor.c        |  48 +-
>  drivers/net/iavf/iavf.h                       |  24 +-
>  drivers/net/iavf/iavf_ethdev.c                | 186 ++--
>  drivers/net/iavf/iavf_hash.c                  | 300 +++---
>  drivers/net/iavf/iavf_rxtx.c                  |   2 +-
>  drivers/net/iavf/iavf_rxtx.h                  |  24 +-
>  drivers/net/iavf/iavf_rxtx_vec_avx2.c         |   4 +-
>  drivers/net/iavf/iavf_rxtx_vec_avx512.c       |   6 +-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c          |   2 +-
>  drivers/net/ice/ice_dcf.c                     |   2 +-
>  drivers/net/ice/ice_dcf_ethdev.c              |  90 +-
>  drivers/net/ice/ice_dcf_vf_representor.c      |  58 +-
>  drivers/net/ice/ice_ethdev.c                  | 190 ++--
>  drivers/net/ice/ice_ethdev.h                  |  26 +-
>  drivers/net/ice/ice_hash.c                    | 268 ++---
>  drivers/net/ice/ice_rxtx.c                    |   8 +-
>  drivers/net/ice/ice_rxtx_vec_avx2.c           |   2 +-
>  drivers/net/ice/ice_rxtx_vec_avx512.c         |   4 +-
>  drivers/net/ice/ice_rxtx_vec_common.h         |  26 +-
>  drivers/net/ice/ice_rxtx_vec_sse.c            |   2 +-
>  drivers/net/igc/igc_ethdev.c                  | 146 +--
>  drivers/net/igc/igc_ethdev.h                  |  56 +-
>  drivers/net/igc/igc_txrx.c                    |  50 +-
>  drivers/net/ionic/ionic_ethdev.c              | 140 +--
>  drivers/net/ionic/ionic_ethdev.h              |  12 +-
>  drivers/net/ionic/ionic_lif.c                 |  36 +-
>  drivers/net/ionic/ionic_rxtx.c                |  10 +-
>  drivers/net/ipn3ke/ipn3ke_representor.c       |  70 +-
>  drivers/net/ixgbe/ixgbe_ethdev.c              | 313 +++---
>  drivers/net/ixgbe/ixgbe_ethdev.h              |  18 +-
>  drivers/net/ixgbe/ixgbe_fdir.c                |  24 +-
>  drivers/net/ixgbe/ixgbe_flow.c                |   2 +-
>  drivers/net/ixgbe/ixgbe_ipsec.c               |  12 +-
>  drivers/net/ixgbe/ixgbe_pf.c                  |  38 +-
>  drivers/net/ixgbe/ixgbe_rxtx.c                | 253 +++--
>  drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +-
>  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |   2 +-
>  drivers/net/ixgbe/ixgbe_tm.c                  |  16 +-
>  drivers/net/ixgbe/ixgbe_vf_representor.c      |  16 +-
>  drivers/net/ixgbe/rte_pmd_ixgbe.c             |  14 +-
>  drivers/net/ixgbe/rte_pmd_ixgbe.h             |   4 +-
>  drivers/net/kni/rte_eth_kni.c                 |   8 +-
>  drivers/net/liquidio/lio_ethdev.c             | 118 +--
>  drivers/net/memif/memif_socket.c              |   2 +-
>  drivers/net/memif/rte_eth_memif.c             |  14 +-
>  drivers/net/mlx4/mlx4_ethdev.c                |  32 +-
>  drivers/net/mlx4/mlx4_flow.c                  |  30 +-
>  drivers/net/mlx4/mlx4_intr.c                  |   8 +-
>  drivers/net/mlx4/mlx4_rxq.c                   |  20 +-
>  drivers/net/mlx4/mlx4_txq.c                   |  24 +-
>  drivers/net/mlx5/linux/mlx5_ethdev_os.c       |  54 +-
>  drivers/net/mlx5/linux/mlx5_os.c              |   6 +-
>  drivers/net/mlx5/mlx5.c                       |   4 +-
>  drivers/net/mlx5/mlx5.h                       |   2 +-
>  drivers/net/mlx5/mlx5_defs.h                  |   6 +-
>  drivers/net/mlx5/mlx5_ethdev.c                |   6 +-
>  drivers/net/mlx5/mlx5_flow.c                  |  54 +-
>  drivers/net/mlx5/mlx5_flow.h                  |  12 +-
>  drivers/net/mlx5/mlx5_flow_dv.c               |  44 +-
>  drivers/net/mlx5/mlx5_flow_verbs.c            |   4 +-
>  drivers/net/mlx5/mlx5_rss.c                   |  10 +-
>  drivers/net/mlx5/mlx5_rxq.c                   |  42 +-
>  drivers/net/mlx5/mlx5_rxtx_vec.h              |   8 +-
>  drivers/net/mlx5/mlx5_tx.c                    |  30 +-
>  drivers/net/mlx5/mlx5_txq.c                   |  52 +-
>  drivers/net/mlx5/mlx5_vlan.c                  |   4 +-
>  drivers/net/mlx5/windows/mlx5_os.c            |   4 +-
>  drivers/net/mvneta/mvneta_ethdev.c            |  34 +-
>  drivers/net/mvneta/mvneta_ethdev.h            |  12 +-
>  drivers/net/mvneta/mvneta_rxtx.c              |   2 +-
>  drivers/net/mvpp2/mrvl_ethdev.c               | 116 +-
>  drivers/net/netvsc/hn_ethdev.c                |  70 +-
>  drivers/net/netvsc/hn_rndis.c                 |  50 +-
>  drivers/net/nfb/nfb_ethdev.c                  |  20 +-
>  drivers/net/nfb/nfb_rx.c                      |   2 +-
>  drivers/net/nfp/nfp_common.c                  | 130 +--
>  drivers/net/nfp/nfp_ethdev.c                  |   2 +-
>  drivers/net/nfp/nfp_ethdev_vf.c               |   2 +-
>  drivers/net/ngbe/ngbe_ethdev.c                |  50 +-
>  drivers/net/null/rte_eth_null.c               |  28 +-
>  drivers/net/octeontx/octeontx_ethdev.c        |  78 +-
>  drivers/net/octeontx/octeontx_ethdev.h        |  32 +-
>  drivers/net/octeontx/octeontx_ethdev_ops.c    |  26 +-
>  drivers/net/octeontx2/otx2_ethdev.c           |  96 +-
>  drivers/net/octeontx2/otx2_ethdev.h           |  66 +-
>  drivers/net/octeontx2/otx2_ethdev_devargs.c   |  12 +-
>  drivers/net/octeontx2/otx2_ethdev_ops.c       |  18 +-
>  drivers/net/octeontx2/otx2_ethdev_sec.c       |   8 +-
>  drivers/net/octeontx2/otx2_flow.c             |   2 +-
>  drivers/net/octeontx2/otx2_flow_ctrl.c        |  36 +-
>  drivers/net/octeontx2/otx2_flow_parse.c       |   4 +-
>  drivers/net/octeontx2/otx2_link.c             |  40 +-
>  drivers/net/octeontx2/otx2_mcast.c            |   2 +-
>  drivers/net/octeontx2/otx2_ptp.c              |   4 +-
>  drivers/net/octeontx2/otx2_rss.c              |  70 +-
>  drivers/net/octeontx2/otx2_rx.c               |   4 +-
>  drivers/net/octeontx2/otx2_tx.c               |   2 +-
>  drivers/net/octeontx2/otx2_vlan.c             |  42 +-
>  drivers/net/octeontx_ep/otx_ep_ethdev.c       |   8 +-
>  drivers/net/octeontx_ep/otx_ep_rxtx.c         |   8 +-
>  drivers/net/pcap/pcap_ethdev.c                |  12 +-
>  drivers/net/pfe/pfe_ethdev.c                  |  18 +-
>  drivers/net/qede/base/mcp_public.h            |   4 +-
>  drivers/net/qede/qede_ethdev.c                | 152 +--
>  drivers/net/qede/qede_filter.c                |  10 +-
>  drivers/net/qede/qede_rxtx.c                  |   2 +-
>  drivers/net/qede/qede_rxtx.h                  |  16 +-
>  drivers/net/ring/rte_eth_ring.c               |  20 +-
>  drivers/net/sfc/sfc.c                         |  30 +-
>  drivers/net/sfc/sfc_ef100_rx.c                |  10 +-
>  drivers/net/sfc/sfc_ef100_tx.c                |  20 +-
>  drivers/net/sfc/sfc_ef10_essb_rx.c            |   4 +-
>  drivers/net/sfc/sfc_ef10_rx.c                 |   8 +-
>  drivers/net/sfc/sfc_ef10_tx.c                 |  32 +-
>  drivers/net/sfc/sfc_ethdev.c                  |  52 +-
>  drivers/net/sfc/sfc_flow.c                    |   2 +-
>  drivers/net/sfc/sfc_port.c                    |  54 +-
>  drivers/net/sfc/sfc_rx.c                      |  52 +-
>  drivers/net/sfc/sfc_tx.c                      |  50 +-
>  drivers/net/softnic/rte_eth_softnic.c         |  12 +-
>  drivers/net/szedata2/rte_eth_szedata2.c       |  14 +-
>  drivers/net/tap/rte_eth_tap.c                 | 104 +-
>  drivers/net/tap/tap_rss.h                     |   2 +-
>  drivers/net/thunderx/nicvf_ethdev.c           | 108 +-
>  drivers/net/thunderx/nicvf_ethdev.h           |  42 +-
>  drivers/net/txgbe/txgbe_ethdev.c              | 244 ++---
>  drivers/net/txgbe/txgbe_ethdev.h              |  18 +-
>  drivers/net/txgbe/txgbe_ethdev_vf.c           |  24 +-
>  drivers/net/txgbe/txgbe_fdir.c                |  20 +-
>  drivers/net/txgbe/txgbe_flow.c                |   2 +-
>  drivers/net/txgbe/txgbe_ipsec.c               |  12 +-
>  drivers/net/txgbe/txgbe_pf.c                  |  34 +-
>  drivers/net/txgbe/txgbe_rxtx.c                | 312 +++---
>  drivers/net/txgbe/txgbe_rxtx.h                |   4 +-
>  drivers/net/txgbe/txgbe_tm.c                  |  16 +-
>  drivers/net/vhost/rte_eth_vhost.c             |  16 +-
>  drivers/net/virtio/virtio_ethdev.c            | 126 +--
>  drivers/net/vmxnet3/vmxnet3_ethdev.c          |  74 +-
>  drivers/net/vmxnet3/vmxnet3_ethdev.h          |  16 +-
>  drivers/net/vmxnet3/vmxnet3_rxtx.c            |  16 +-
>  examples/bbdev_app/main.c                     |   6 +-
>  examples/bond/main.c                          |  14 +-
>  examples/distributor/main.c                   |  12 +-
>  examples/ethtool/ethtool-app/main.c           |   2 +-
>  examples/ethtool/lib/rte_ethtool.c            |  18 +-
>  .../pipeline_worker_generic.c                 |  16 +-
>  .../eventdev_pipeline/pipeline_worker_tx.c    |  12 +-
>  examples/flow_classify/flow_classify.c        |   4 +-
>  examples/flow_filtering/main.c                |  16 +-
>  examples/ioat/ioatfwd.c                       |   8 +-
>  examples/ip_fragmentation/main.c              |  14 +-
>  examples/ip_pipeline/link.c                   |  20 +-
>  examples/ip_reassembly/main.c                 |  20 +-
>  examples/ipsec-secgw/ipsec-secgw.c            |  34 +-
>  examples/ipsec-secgw/sa.c                     |   8 +-
>  examples/ipv4_multicast/main.c                |   8 +-
>  examples/kni/main.c                           |  12 +-
>  examples/l2fwd-crypto/main.c                  |  10 +-
>  examples/l2fwd-event/l2fwd_common.c           |  10 +-
>  examples/l2fwd-event/main.c                   |   2 +-
>  examples/l2fwd-jobstats/main.c                |   8 +-
>  examples/l2fwd-keepalive/main.c               |   8 +-
>  examples/l2fwd/main.c                         |   8 +-
>  examples/l3fwd-acl/main.c                     |  20 +-
>  examples/l3fwd-graph/main.c                   |  16 +-
>  examples/l3fwd-power/main.c                   |  18 +-
>  examples/l3fwd/l3fwd_event.c                  |   4 +-
>  examples/l3fwd/main.c                         |  20 +-
>  examples/link_status_interrupt/main.c         |  10 +-
>  .../client_server_mp/mp_server/init.c         |   4 +-
>  examples/multi_process/symmetric_mp/main.c    |  14 +-
>  examples/ntb/ntb_fwd.c                        |   6 +-
>  examples/packet_ordering/main.c               |   4 +-
>  .../performance-thread/l3fwd-thread/main.c    |  18 +-
>  examples/pipeline/obj.c                       |  20 +-
>  examples/ptpclient/ptpclient.c                |  10 +-
>  examples/qos_meter/main.c                     |  16 +-
>  examples/qos_sched/init.c                     |   6 +-
>  examples/rxtx_callbacks/main.c                |   8 +-
>  examples/server_node_efd/server/init.c        |   8 +-
>  examples/skeleton/basicfwd.c                  |   4 +-
>  examples/vhost/main.c                         |  28 +-
>  examples/vm_power_manager/main.c              |   6 +-
>  examples/vmdq/main.c                          |  20 +-
>  examples/vmdq_dcb/main.c                      |  40 +-
>  lib/ethdev/rte_ethdev.c                       | 193 ++--
>  lib/ethdev/rte_ethdev.h                       | 997 +++++++++++-------
>  lib/ethdev/rte_ethdev_core.h                  |   2 +-
>  lib/ethdev/rte_flow.h                         |   2 +-
>  lib/gso/rte_gso.c                             |  20 +-
>  lib/gso/rte_gso.h                             |   4 +-
>  lib/mbuf/rte_mbuf_core.h                      |   8 +-
>  lib/mbuf/rte_mbuf_dyn.h                       |   2 +-
>  339 files changed, 6728 insertions(+), 6500 deletions(-)

I would love having some script with such a mass change.
The two usages of scripting would be:
1/ convert to old names for backport of future patches
2/ convert applications to new names

For the first usage, we could update this script:
	devtools/update-patches.py

A coccinelle script could be added to devtools/cocci/



^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4] ethdev: add namespace
  2021-08-30 17:19   ` [dpdk-dev] [PATCH v3] " Ferruh Yigit
  2021-08-31  7:59     ` Thomas Monjalon
@ 2021-10-18 15:43     ` Ferruh Yigit
  2021-10-20 19:23       ` [dpdk-dev] [PATCH v5] " Ferruh Yigit
  1 sibling, 1 reply; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-18 15:43 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff, David Marchand

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=true, Size: 1212305 bytes --]

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Cc: David Marchand <david.marchand@redhat.com>
Cc: Thomas Monjalon <thomas@monjalon.net>

v2:
* Updated internal components
* Removed deprecation notice

v3:
* Updated missing macros / structs that David highlighted
* Added release notes update

v4:
* rebased on latest next-net
* depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
* Not able to complete scripts to update user code, although some
  shared by Aman:
  https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
  Sending new version for possible option to get this patch for -rc1 and
  work for scripts later, before release.
---
 app/proc-info/main.c                          |    8 +-
 app/test-eventdev/test_perf_common.c          |    4 +-
 app/test-eventdev/test_pipeline_common.c      |   10 +-
 app/test-flow-perf/config.h                   |    2 +-
 app/test-pipeline/init.c                      |    8 +-
 app/test-pmd/cmdline.c                        |  286 ++---
 app/test-pmd/config.c                         |  200 ++--
 app/test-pmd/csumonly.c                       |   28 +-
 app/test-pmd/flowgen.c                        |    6 +-
 app/test-pmd/macfwd.c                         |    6 +-
 app/test-pmd/macswap_common.h                 |    6 +-
 app/test-pmd/parameters.c                     |   54 +-
 app/test-pmd/testpmd.c                        |   52 +-
 app/test-pmd/testpmd.h                        |    2 +-
 app/test-pmd/txonly.c                         |    6 +-
 app/test/test_ethdev_link.c                   |   68 +-
 app/test/test_event_eth_rx_adapter.c          |    4 +-
 app/test/test_kni.c                           |    2 +-
 app/test/test_link_bonding.c                  |    4 +-
 app/test/test_link_bonding_mode4.c            |    4 +-
 app/test/test_link_bonding_rssconf.c          |   28 +-
 app/test/test_pmd_perf.c                      |   12 +-
 app/test/virtual_pmd.c                        |   10 +-
 doc/guides/eventdevs/cnxk.rst                 |    2 +-
 doc/guides/eventdevs/octeontx2.rst            |    2 +-
 doc/guides/nics/af_packet.rst                 |    2 +-
 doc/guides/nics/bnxt.rst                      |   24 +-
 doc/guides/nics/enic.rst                      |    2 +-
 doc/guides/nics/features.rst                  |  114 +-
 doc/guides/nics/fm10k.rst                     |    6 +-
 doc/guides/nics/intel_vf.rst                  |   10 +-
 doc/guides/nics/ixgbe.rst                     |   12 +-
 doc/guides/nics/mlx5.rst                      |    4 +-
 doc/guides/nics/tap.rst                       |    2 +-
 .../generic_segmentation_offload_lib.rst      |    8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |   18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |    8 +-
 doc/guides/prog_guide/rte_flow.rst            |   34 +-
 doc/guides/prog_guide/rte_security.rst        |    2 +-
 doc/guides/rel_notes/deprecation.rst          |   10 +-
 doc/guides/rel_notes/release_21_11.rst        |    3 +
 doc/guides/sample_app_ug/ipsec_secgw.rst      |    4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |    2 +-
 drivers/bus/dpaa/include/process.h            |   16 +-
 drivers/common/cnxk/roc_npc.h                 |    2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |   20 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |   12 +-
 drivers/net/ark/ark_ethdev.c                  |   16 +-
 drivers/net/atlantic/atl_ethdev.c             |   88 +-
 drivers/net/atlantic/atl_ethdev.h             |   18 +-
 drivers/net/atlantic/atl_rxtx.c               |    6 +-
 drivers/net/avp/avp_ethdev.c                  |   26 +-
 drivers/net/axgbe/axgbe_dev.c                 |    6 +-
 drivers/net/axgbe/axgbe_ethdev.c              |  104 +-
 drivers/net/axgbe/axgbe_ethdev.h              |   12 +-
 drivers/net/axgbe/axgbe_mdio.c                |    2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |    6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |   12 +-
 drivers/net/bnxt/bnxt.h                       |   62 +-
 drivers/net/bnxt/bnxt_ethdev.c                |  172 +--
 drivers/net/bnxt/bnxt_flow.c                  |    6 +-
 drivers/net/bnxt/bnxt_hwrm.c                  |  112 +-
 drivers/net/bnxt/bnxt_reps.c                  |    2 +-
 drivers/net/bnxt/bnxt_ring.c                  |    4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |   28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |    4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |    2 +-
 drivers/net/bnxt/bnxt_txr.c                   |    4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |   30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |    8 +-
 drivers/net/bonding/eth_bond_private.h        |    4 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |   16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |    6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |   50 +-
 drivers/net/cnxk/cn10k_ethdev.c               |   42 +-
 drivers/net/cnxk/cn10k_rx.c                   |    4 +-
 drivers/net/cnxk/cn10k_tx.c                   |    4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |   60 +-
 drivers/net/cnxk/cn9k_rx.c                    |    4 +-
 drivers/net/cnxk/cn9k_tx.c                    |    4 +-
 drivers/net/cnxk/cnxk_ethdev.c                |  112 +-
 drivers/net/cnxk/cnxk_ethdev.h                |   49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |    6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            |  106 +-
 drivers/net/cnxk/cnxk_link.c                  |   14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |    4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |    2 +-
 drivers/net/cxgbe/cxgbe.h                     |   46 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |   42 +-
 drivers/net/cxgbe/cxgbe_main.c                |   12 +-
 drivers/net/dpaa/dpaa_ethdev.c                |  180 +--
 drivers/net/dpaa/dpaa_ethdev.h                |   10 +-
 drivers/net/dpaa/dpaa_flow.c                  |   32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |   47 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              |  138 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |   22 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |    8 +-
 drivers/net/e1000/e1000_ethdev.h              |   18 +-
 drivers/net/e1000/em_ethdev.c                 |   64 +-
 drivers/net/e1000/em_rxtx.c                   |   38 +-
 drivers/net/e1000/igb_ethdev.c                |  158 +--
 drivers/net/e1000/igb_pf.c                    |    2 +-
 drivers/net/e1000/igb_rxtx.c                  |  116 +-
 drivers/net/ena/ena_ethdev.c                  |   66 +-
 drivers/net/ena/ena_ethdev.h                  |    4 +-
 drivers/net/ena/ena_rss.c                     |   74 +-
 drivers/net/enetc/enetc_ethdev.c              |   30 +-
 drivers/net/enic/enic.h                       |    2 +-
 drivers/net/enic/enic_ethdev.c                |   88 +-
 drivers/net/enic/enic_main.c                  |   40 +-
 drivers/net/enic/enic_res.c                   |   50 +-
 drivers/net/failsafe/failsafe.c               |    8 +-
 drivers/net/failsafe/failsafe_intr.c          |    4 +-
 drivers/net/failsafe/failsafe_ops.c           |   78 +-
 drivers/net/fm10k/fm10k.h                     |    4 +-
 drivers/net/fm10k/fm10k_ethdev.c              |  146 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |    6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |   22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          |  136 +--
 drivers/net/hinic/hinic_pmd_rx.c              |   36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |   22 +-
 drivers/net/hns3/hns3_dcb.c                   |   14 +-
 drivers/net/hns3/hns3_ethdev.c                |  352 +++---
 drivers/net/hns3/hns3_ethdev.h                |   12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             |  100 +-
 drivers/net/hns3/hns3_flow.c                  |    6 +-
 drivers/net/hns3/hns3_ptp.c                   |    2 +-
 drivers/net/hns3/hns3_rss.c                   |  108 +-
 drivers/net/hns3/hns3_rss.h                   |   28 +-
 drivers/net/hns3/hns3_rxtx.c                  |   30 +-
 drivers/net/hns3/hns3_rxtx.h                  |    2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |   10 +-
 drivers/net/i40e/i40e_ethdev.c                |  272 ++---
 drivers/net/i40e/i40e_ethdev.h                |   24 +-
 drivers/net/i40e/i40e_flow.c                  |   32 +-
 drivers/net/i40e/i40e_hash.c                  |  156 +--
 drivers/net/i40e/i40e_pf.c                    |   14 +-
 drivers/net/i40e/i40e_rxtx.c                  |    8 +-
 drivers/net/i40e/i40e_rxtx.h                  |    4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |    2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |    8 +-
 drivers/net/i40e/i40e_vf_representor.c        |   48 +-
 drivers/net/iavf/iavf.h                       |   24 +-
 drivers/net/iavf/iavf_ethdev.c                |  178 +--
 drivers/net/iavf/iavf_hash.c                  |  320 ++---
 drivers/net/iavf/iavf_rxtx.c                  |    2 +-
 drivers/net/iavf/iavf_rxtx.h                  |   24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |    4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |    6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |    2 +-
 drivers/net/ice/ice_dcf.c                     |    2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |   86 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |   56 +-
 drivers/net/ice/ice_ethdev.c                  |  180 +--
 drivers/net/ice/ice_ethdev.h                  |   26 +-
 drivers/net/ice/ice_hash.c                    |  290 ++---
 drivers/net/ice/ice_rxtx.c                    |   16 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |    2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |    4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |   28 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |    2 +-
 drivers/net/igc/igc_ethdev.c                  |  138 +--
 drivers/net/igc/igc_ethdev.h                  |   54 +-
 drivers/net/igc/igc_txrx.c                    |   48 +-
 drivers/net/ionic/ionic_ethdev.c              |  138 +--
 drivers/net/ionic/ionic_ethdev.h              |   12 +-
 drivers/net/ionic/ionic_lif.c                 |   36 +-
 drivers/net/ionic/ionic_rxtx.c                |   10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |   64 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              |  285 +++--
 drivers/net/ixgbe/ixgbe_ethdev.h              |   18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |   24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |    2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |   12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |   34 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                |  249 ++--
 drivers/net/ixgbe/ixgbe_rxtx.h                |    4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |    2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |   16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |   16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |   14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |    4 +-
 drivers/net/kni/rte_eth_kni.c                 |    8 +-
 drivers/net/liquidio/lio_ethdev.c             |  114 +-
 drivers/net/memif/memif_socket.c              |    2 +-
 drivers/net/memif/rte_eth_memif.c             |   16 +-
 drivers/net/mlx4/mlx4_ethdev.c                |   32 +-
 drivers/net/mlx4/mlx4_flow.c                  |   30 +-
 drivers/net/mlx4/mlx4_intr.c                  |    8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |   18 +-
 drivers/net/mlx4/mlx4_txq.c                   |   24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |   54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |    6 +-
 drivers/net/mlx5/mlx5.c                       |    4 +-
 drivers/net/mlx5/mlx5.h                       |    2 +-
 drivers/net/mlx5/mlx5_defs.h                  |    6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |    6 +-
 drivers/net/mlx5/mlx5_flow.c                  |   54 +-
 drivers/net/mlx5/mlx5_flow.h                  |   12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |   44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |    4 +-
 drivers/net/mlx5/mlx5_rss.c                   |   10 +-
 drivers/net/mlx5/mlx5_rxq.c                   |   40 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |    8 +-
 drivers/net/mlx5/mlx5_tx.c                    |   30 +-
 drivers/net/mlx5/mlx5_txq.c                   |   58 +-
 drivers/net/mlx5/mlx5_vlan.c                  |    4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |    4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |   32 +-
 drivers/net/mvneta/mvneta_ethdev.h            |   10 +-
 drivers/net/mvneta/mvneta_rxtx.c              |    2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               |  112 +-
 drivers/net/netvsc/hn_ethdev.c                |   70 +-
 drivers/net/netvsc/hn_rndis.c                 |   50 +-
 drivers/net/nfb/nfb_ethdev.c                  |   20 +-
 drivers/net/nfb/nfb_rx.c                      |    2 +-
 drivers/net/nfp/nfp_common.c                  |  122 +-
 drivers/net/nfp/nfp_ethdev.c                  |    2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |    2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |   50 +-
 drivers/net/null/rte_eth_null.c               |   28 +-
 drivers/net/octeontx/octeontx_ethdev.c        |   74 +-
 drivers/net/octeontx/octeontx_ethdev.h        |   30 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |   26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |   96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |   64 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |   12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |   14 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |    8 +-
 drivers/net/octeontx2/otx2_flow.c             |    2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |   36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |    4 +-
 drivers/net/octeontx2/otx2_link.c             |   40 +-
 drivers/net/octeontx2/otx2_mcast.c            |    2 +-
 drivers/net/octeontx2/otx2_ptp.c              |    4 +-
 drivers/net/octeontx2/otx2_rss.c              |   70 +-
 drivers/net/octeontx2/otx2_rx.c               |    4 +-
 drivers/net/octeontx2/otx2_tx.c               |    2 +-
 drivers/net/octeontx2/otx2_vlan.c             |   42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |    6 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |    6 +-
 drivers/net/pcap/pcap_ethdev.c                |   12 +-
 drivers/net/pfe/pfe_ethdev.c                  |   18 +-
 drivers/net/qede/base/mcp_public.h            |    4 +-
 drivers/net/qede/qede_ethdev.c                |  156 +--
 drivers/net/qede/qede_filter.c                |   42 +-
 drivers/net/qede/qede_rxtx.c                  |    2 +-
 drivers/net/qede/qede_rxtx.h                  |   16 +-
 drivers/net/ring/rte_eth_ring.c               |   20 +-
 drivers/net/sfc/sfc.c                         |   30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |   10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |   20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |    4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |    8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |   32 +-
 drivers/net/sfc/sfc_ethdev.c                  |   50 +-
 drivers/net/sfc/sfc_flow.c                    |    2 +-
 drivers/net/sfc/sfc_port.c                    |   52 +-
 drivers/net/sfc/sfc_repr.c                    |   10 +-
 drivers/net/sfc/sfc_rx.c                      |   50 +-
 drivers/net/sfc/sfc_tx.c                      |   50 +-
 drivers/net/softnic/rte_eth_softnic.c         |   12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |   14 +-
 drivers/net/tap/rte_eth_tap.c                 |  104 +-
 drivers/net/tap/tap_rss.h                     |    2 +-
 drivers/net/thunderx/nicvf_ethdev.c           |  102 +-
 drivers/net/thunderx/nicvf_ethdev.h           |   40 +-
 drivers/net/txgbe/txgbe_ethdev.c              |  242 ++--
 drivers/net/txgbe/txgbe_ethdev.h              |   18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |   24 +-
 drivers/net/txgbe/txgbe_fdir.c                |   20 +-
 drivers/net/txgbe/txgbe_flow.c                |    2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |   12 +-
 drivers/net/txgbe/txgbe_pf.c                  |   34 +-
 drivers/net/txgbe/txgbe_rxtx.c                |  308 ++---
 drivers/net/txgbe/txgbe_rxtx.h                |    4 +-
 drivers/net/txgbe/txgbe_tm.c                  |   16 +-
 drivers/net/vhost/rte_eth_vhost.c             |   16 +-
 drivers/net/virtio/virtio_ethdev.c            |  124 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |   72 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |   16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |   16 +-
 examples/bbdev_app/main.c                     |    6 +-
 examples/bond/main.c                          |   14 +-
 examples/distributor/main.c                   |   12 +-
 examples/ethtool/ethtool-app/main.c           |    2 +-
 examples/ethtool/lib/rte_ethtool.c            |   18 +-
 .../pipeline_worker_generic.c                 |   16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |   12 +-
 examples/flow_classify/flow_classify.c        |    4 +-
 examples/flow_filtering/main.c                |   16 +-
 examples/ioat/ioatfwd.c                       |    8 +-
 examples/ip_fragmentation/main.c              |   12 +-
 examples/ip_pipeline/link.c                   |   20 +-
 examples/ip_reassembly/main.c                 |   18 +-
 examples/ipsec-secgw/ipsec-secgw.c            |   32 +-
 examples/ipsec-secgw/sa.c                     |    8 +-
 examples/ipv4_multicast/main.c                |    6 +-
 examples/kni/main.c                           |    8 +-
 examples/l2fwd-crypto/main.c                  |   10 +-
 examples/l2fwd-event/l2fwd_common.c           |   10 +-
 examples/l2fwd-event/main.c                   |    2 +-
 examples/l2fwd-jobstats/main.c                |    8 +-
 examples/l2fwd-keepalive/main.c               |    8 +-
 examples/l2fwd/main.c                         |    8 +-
 examples/l3fwd-acl/main.c                     |   18 +-
 examples/l3fwd-graph/main.c                   |   14 +-
 examples/l3fwd-power/main.c                   |   16 +-
 examples/l3fwd/l3fwd_event.c                  |    4 +-
 examples/l3fwd/main.c                         |   18 +-
 examples/link_status_interrupt/main.c         |   10 +-
 .../client_server_mp/mp_server/init.c         |    4 +-
 examples/multi_process/symmetric_mp/main.c    |   14 +-
 examples/ntb/ntb_fwd.c                        |    6 +-
 examples/packet_ordering/main.c               |    4 +-
 .../performance-thread/l3fwd-thread/main.c    |   16 +-
 examples/pipeline/obj.c                       |   20 +-
 examples/ptpclient/ptpclient.c                |   10 +-
 examples/qos_meter/main.c                     |   16 +-
 examples/qos_sched/init.c                     |    6 +-
 examples/rxtx_callbacks/main.c                |    8 +-
 examples/server_node_efd/server/init.c        |    8 +-
 examples/skeleton/basicfwd.c                  |    4 +-
 examples/vhost/main.c                         |   26 +-
 examples/vm_power_manager/main.c              |    6 +-
 examples/vmdq/main.c                          |   20 +-
 examples/vmdq_dcb/main.c                      |   40 +-
 lib/ethdev/ethdev_driver.h                    |   36 +-
 lib/ethdev/rte_ethdev.c                       |  181 ++-
 lib/ethdev/rte_ethdev.h                       | 1029 +++++++++++------
 lib/ethdev/rte_flow.h                         |    2 +-
 lib/gso/rte_gso.c                             |   20 +-
 lib/gso/rte_gso.h                             |    4 +-
 lib/mbuf/rte_mbuf_core.h                      |    8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |    2 +-
 338 files changed, 6639 insertions(+), 6382 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index a8e928fa9ff3..963b6aa5c589 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 660d5a0364b6..31d1b0e14653 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,13 +668,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 2775e72c580d..d202091077a6 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 88354ccab9d4..02011f668034 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "ipv4-chksum"))
-		rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+		rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
 			return -1;
 		}
 
-		idx = hash_index / RTE_RETA_GROUP_SIZE;
-		shift = hash_index % RTE_RETA_GROUP_SIZE;
+		idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+		shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[idx].mask |= (1ULL << shift);
 		reta_conf[idx].reta[shift] = nb_queue;
 	}
@@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
 	char *end;
 	char *str_fld[8];
 	uint16_t i;
-	uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 	int ret;
 
 	p = strchr(p0, '(');
@@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4615,55 +4615,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index bdcd826490d1..47ff307e39c0 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,62 +86,62 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
-	{ "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
-	{ "l4-chksum", ETH_RSS_L4_CHKSUM },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
+	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
 	{ NULL, 0 },
 };
 
@@ -538,39 +538,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -700,9 +700,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -720,22 +720,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -2904,8 +2904,8 @@ port_rss_reta_info(portid_t port_id,
 	}
 
 	for (i = 0; i < nb_entries; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3273,7 +3273,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4336,11 +4336,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4366,11 +4366,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4411,11 +4411,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4441,11 +4441,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4515,7 +4515,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4524,7 +4524,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4532,7 +4532,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4551,7 +4551,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4559,8 +4559,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4569,8 +4569,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -4976,7 +4976,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 090797318a35..75b24487e72e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 7ebed9fed334..03d026dec169 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index ee76df7f0323..57e00bca20e7 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index ab8e8f7e694a..693e77eff2c0 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -546,29 +546,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -1000,13 +1000,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -1048,34 +1048,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1097,13 +1097,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1482,12 +1482,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index de7a8c295527..df7d16fee71e 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -348,7 +348,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -459,12 +459,12 @@ lcoreid_t latencystats_lcore_id = -1;
 struct rte_eth_rxmode rx_mode;
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -518,7 +518,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1572,9 +1572,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1711,8 +1711,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3456,7 +3456,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3750,17 +3750,17 @@ init_port_config(void)
 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			} else {
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 				port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 				for (i = 0;
 				     i < port->dev_info.nb_rx_queues;
 				     i++)
 					port->rx_conf[i].offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 			}
 		}
 
@@ -3848,9 +3848,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3858,7 +3858,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3866,8 +3866,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3883,23 +3883,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3928,7 +3928,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3978,7 +3978,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(rte_port);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index dd8f27a296b6..697f1bf8cac6 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -465,7 +465,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 extern uint32_t max_rx_pkt_len;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e45f8840c91c..9eb7992815e8 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..6248aea49abd 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index add4d8a67821..a09253e91814 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 5388d18125a6..8a9ef851789f 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 189d2430f27e..351129de2f9b 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index e7bb0497b663..f9eae9397386 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -52,7 +52,7 @@ struct slave_conf {
 
 	struct rte_eth_rss_conf rss_conf;
 	uint8_t rss_key[40];
-	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t is_slave;
 	struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
 struct link_bonding_rssconf_unittest_params {
 	uint8_t bond_port_id;
 	struct rte_eth_dev_info bond_dev_info;
-	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 	struct slave_conf slave_ports[SLAVE_COUNT];
 
 	struct rte_mempool *mbuf_pool;
@@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
@@ -207,13 +207,13 @@ bond_slaves(void)
 static int
 reta_set(uint16_t port_id, uint8_t value, int reta_size)
 {
-	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
 	int i, j;
 
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
 		/* select all fields to set */
 		reta_conf[i].mask = ~0LL;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			reta_conf[i].reta[j] = value;
 	}
 
@@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
 	for (i = 0; i < test_params.bond_dev_info.reta_size;
 			i++) {
 
-		int index = i / RTE_RETA_GROUP_SIZE;
-		int shift = i % RTE_RETA_GROUP_SIZE;
+		int index = i / RTE_ETH_RETA_GROUP_SIZE;
+		int shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (port->reta_conf[index].reta[shift] !=
 				test_params.bond_reta_conf[index].reta[shift])
@@ -251,7 +251,7 @@ static int
 bond_reta_fetch(void) {
 	unsigned j;
 
-	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
 			j++)
 		test_params.bond_reta_conf[j].mask = ~0LL;
 
@@ -268,7 +268,7 @@ static int
 slave_reta_fetch(struct slave_conf *port) {
 	unsigned j;
 
-	for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+	for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
 		port->reta_conf[j].mask = ~0LL;
 
 	TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index a3b4f52c65e6..1df86ce080e5 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7e15b47eb0fb..d9f2e4f66bde 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -562,9 +562,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/nics/af_packet.rst b/doc/guides/nics/af_packet.rst
index bdd6e7263c85..54feffdef4bd 100644
--- a/doc/guides/nics/af_packet.rst
+++ b/doc/guides/nics/af_packet.rst
@@ -70,5 +70,5 @@ Features and Limitations
 ------------------------
 
 The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
 application.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index aa6032889a55..b3d10f30dc77 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index 8dd421ca013b..b48d9dcb9591 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -177,7 +177,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -205,12 +205,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -221,12 +221,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -287,9 +287,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -302,7 +302,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -339,7 +339,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -362,7 +362,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -378,7 +378,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -416,13 +416,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -438,14 +438,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -459,7 +459,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -469,13 +469,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -487,14 +487,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -508,7 +508,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -519,16 +519,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -538,8 +538,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -547,8 +547,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -557,10 +557,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -570,11 +570,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -584,16 +584,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -603,15 +603,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_packet_type_parsing:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index ed6afd62703d..bba53f5a64ee 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41bb4..a1e236ad75e5 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -216,21 +216,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 20a74b9b5bcd..148d2f5fc2be 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -163,13 +163,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index e4f58c899031..cc1726207f6c 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -607,7 +607,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, MTU can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate MTU as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index fa05fe084500..b507396fb4d7 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1943,23 +1943,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index ad92c16868c1..46c9b51d1bf9 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 0b4d03fb961f..199c3fa0bd70 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -58,22 +58,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index ec2a788789f7..9a50c3281dd2 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -350,6 +350,9 @@ ABI Changes
   to be transparent for both users (no changes in user app is required) and
   PMD developers (no changes in PMD is required).
 
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+  updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
 
 Known Issues
 ------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 8ff7ab85369c..2e1446ee461b 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -537,7 +537,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index 65d4bd6edcec..c12400ff5110 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -154,7 +154,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index a077376dc0fb..8f778f0c2419 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 	const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
 	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	return 0;
 }
 
@@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index b362ccdcd38c..e156246f24df 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -652,7 +652,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -661,7 +661,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5a198f53fce7..f7bfac796c07 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index fbc9917ed30d..ed9ef9f0cc52 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 0d3460383a50..2ff426892df2 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 932ec90265cf..5d94db02c506 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index ca32ad641873..3aaa2193272f 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 0250256830ac..dab0c6775d1d 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
 
 	max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index c8618d2d6daa..aa2c27ebaa49 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 567ea2382864..78fc717ec44a 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 6743cf92b0e6..39bd739c7bc9 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,37 +569,37 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index f385723a9f65..2791a5c62db1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
 	bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < reta_size; i++) {
 		struct bnxt_rx_queue *rxq;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (!(reta_conf[idx].mask & (1ULL << sft)))
 			continue;
@@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 	}
 
 	for (idx = 0, i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
@@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		tunnel_type =
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index b2ebb5634e3a..ced697a73980 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
 	}
 
 	/* If RSS types is 0, use a best effort configuration */
-	types = rss->types ? rss->types : ETH_RSS_IPV4;
+	types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
 
 	hash_type = bnxt_rte_to_hwrm_hash_types(types);
 
@@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 181e607d7bf8..82e89b7c8af7 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index b7e88e013a84..1c07db3ca9c5 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 08cefa1baaef..7940d489a102 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 			rx_ring_info->rx_ring_struct->ring_size *
 			AGG_RING_SIZE_FACTOR)) : 0;
 
-		if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+		if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 			int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 			tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 					    ag_bitmap_start, ag_bitmap_len);
 
 			/* TPA info */
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 				rx_ring_info->tpa_info =
 					((struct bnxt_tpa_info *)
 					 ((char *)mz->addr + tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 38ec4aa14b77..1456f8b54ffa 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aeacc60a0127..eb555c4545e6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 9e45ddd7a82e..f2fcaf53021c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..8b104b639184 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,8 +167,8 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
 	uint8_t rss_key_len;				/**< hash key length in bytes. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2029955c1092..ca50583d62d8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 5140ef14c2ee..84943cffe2bb 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 8d038ba6b6c4..834a5937b3aa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		/* rss_key won't be empty if RSS is configured in bonded dev */
 		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.mtu =
 			bonded_eth_dev->data->dev_conf.rxmode.mtu;
@@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < reta_count; i++) {
 		internals->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
 
@@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		struct rte_eth_rss_conf *rss_conf =
 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
 		if (rss_conf->rss_key != NULL) {
@@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 
 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
 			internals->reta_conf[i].mask = ~0LL;
-			for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 				internals->reta_conf[i].reta[j] =
-						(i * RTE_RETA_GROUP_SIZE + j) %
+						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
 						dev->data->nb_rx_queues;
 		}
 	}
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 9dfea99db9b2..d52f8ffecf23 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index d6af54b56de6..5d603514c045 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index eb962ef08cab..5e6c5ee11188 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 08c86f9e6b7b..17f8f6debbc8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 5c4387e74e0b..8d504c4a6d92 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index e5691a2a7e16..f3f19fed9780 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index c94fc505fef1..330256a0d34b 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	struct roc_nix *nix = &dev->nix;
 	int i, rc = 0;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
@@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		cnxk_nix_inb_mode_set(dev, true);
 	}
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		struct plt_bitmap *bmap;
 		size_t bmap_sz;
 		void *mem;
@@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 
 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
 
-		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
-		if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+		/* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
 			goto done;
 
 		rc = -ENOMEM;
@@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 done:
 	return 0;
 cleanup:
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		rc |= roc_nix_inl_inb_fini(nix);
 	return rc;
 }
@@ -150,7 +150,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;
 
 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -167,8 +167,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	}
 
 	/* Cleanup Inline outbound */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy outbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -210,8 +210,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -241,7 +241,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -249,10 +249,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -273,11 +273,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -320,7 +320,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -348,7 +348,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* When Tx Security offload is enabled, increase tx desc count by
 	 * max possible outbound desc count.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		nb_desc += dev->outb.nb_desc;
 
 	/* Setup ROC SQ */
@@ -467,7 +467,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * to avoid meta packet drop as LBK does not currently support
 	 * backpressure.
 	 */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
 		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
 
 		/* Use current RQ's aura limit if inl rq is not available */
@@ -529,7 +529,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rxq_sp->qconf.nb_desc = nb_desc;
 	rxq_sp->qconf.mp = mp;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup rq reference for inline dev if present */
 		rc = roc_nix_inl_dev_rq_get(rq);
 		if (rc)
@@ -547,7 +547,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -586,7 +586,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);
 
 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		roc_nix_inl_dev_rq_put(rq);
 
 	/* Cleanup ROC RQ */
@@ -625,24 +625,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -651,34 +651,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -704,7 +704,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -916,8 +916,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -965,13 +965,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -1007,7 +1007,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1015,7 +1015,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
 		 * platform does not support it.
@@ -1401,12 +1401,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 2304af6ffa8b..a4247e52523a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -57,41 +57,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH |            \
-	 DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP |                \
-	 DEV_RX_OFFLOAD_SECURITY)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |    \
+	 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
+	 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
+	 RTE_ETH_RX_OFFLOAD_SECURITY)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index c0b949e21ab0..e068f553495c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index d0924df76152..67464302653d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 		goto fail;
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = reta[idx];
 			idx++;
@@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 6a7080167598..f10a502826c6 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index 27defd2fa984..2dfc3730a0da 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 37625c5bfb69..dbcbfaf68a30 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,31 +28,31 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index f77b2976002c..4758321778d1 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 91d6bb9bbcb0..f1ac32270961 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c79cdb8d8ad7..89ea7dd47c0b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,29 +54,29 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (max_rx_pktlen <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
 				"MaxSGlist %d",
@@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 08f49af7685d..3170694841df 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
-			case ETH_RSS_ETH:
-
+			case RTE_ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_ETH:
 				if (l2_configured)
 					break;
 				l2_configured = 1;
@@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_PPPOE:
+			case RTE_ETH_RSS_PPPOE:
 				if (pppoe_configured)
 					break;
 				kg_cfg->extracts[i].extract.from_hdr.prot =
@@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_ESP:
+			case RTE_ETH_RSS_ESP:
 				if (esp_configured)
 					break;
 				esp_configured = 1;
@@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_AH:
+			case RTE_ETH_RSS_AH:
 				if (ah_configured)
 					break;
 				ah_configured = 1;
@@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_C_VLAN:
-			case ETH_RSS_S_VLAN:
+			case RTE_ETH_RSS_C_VLAN:
+			case RTE_ETH_RSS_S_VLAN:
 				if (vlan_configured)
 					break;
 				vlan_configured = 1;
@@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a0270e78520e..59e728577f53 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,33 +38,33 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return -1;
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fdc62ec30d22..c5e9267bf04d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,17 +65,17 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS | \
-	ETH_RSS_C_VLAN | \
-	ETH_RSS_S_VLAN | \
-	ETH_RSS_ESP | \
-	ETH_RSS_AH | \
-	ETH_RSS_PPPOE)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS | \
+	RTE_ETH_RSS_C_VLAN | \
+	RTE_ETH_RSS_S_VLAN | \
+	RTE_ETH_RSS_ESP | \
+	RTE_ETH_RSS_AH | \
+	RTE_ETH_RSS_PPPOE)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 93bee734ae5d..031c92a66fa0 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -81,15 +81,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 73152dec6ed1..9da477e59def 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -597,8 +597,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -611,39 +611,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1102,9 +1102,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1162,17 +1162,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1424,15 +1424,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1601,7 +1601,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1683,13 +1683,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 344149c19147..648b04154c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -173,7 +173,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1171,11 +1171,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1369,13 +1369,13 @@ em_get_rx_port_offloads_capa(void)
 	uint64_t rx_offload_capa;
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return rx_offload_capa;
 }
@@ -1469,7 +1469,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1788,7 +1788,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1831,7 +1831,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1844,7 +1844,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1870,7 +1870,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index dbe811a1ad2f..ae3bc4a9c201 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1073,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1099,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1117,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1146,8 +1146,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1287,8 +1287,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1296,7 +1296,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1310,39 +1310,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2185,21 +2185,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2225,7 +2225,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2251,9 +2251,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2296,12 +2296,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2402,17 +2402,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2588,7 +2588,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2703,22 +2703,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2870,7 +2870,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3024,13 +3024,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3099,18 +3099,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3258,22 +3258,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3571,16 +3571,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -3612,16 +3612,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a1d5eecc14a1..bcce2fc726d8 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -186,7 +186,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1459,13 +1459,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1640,19 +1640,19 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1733,7 +1733,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1950,23 +1950,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2032,23 +2032,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2170,15 +2170,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2214,9 +2214,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	/* VLVF: set up filters for vlan tags as configured */
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
-		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
-			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+			(cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
 			E1000_VLVF_POOLSEL_MASK)));
 	}
 
@@ -2268,7 +2268,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2282,14 +2282,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2338,7 +2338,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2374,7 +2374,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2444,7 +2444,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2488,16 +2488,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2505,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2545,7 +2545,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2743,7 +2743,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 3fde099ab42c..57b53bfd6c48 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -116,10 +116,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -310,7 +310,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -318,7 +318,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -335,12 +335,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -621,9 +621,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -901,7 +901,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -1840,9 +1840,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
@@ -1893,35 +1893,35 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Set Tx & Rx features available for device */
 	if (adapter->offloads.tso4_supported)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
+		tx_feat	|= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_csum_supported)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+		tx_feat |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_csum_supported)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM  |
-			DEV_RX_OFFLOAD_TCP_CKSUM;
+		rx_feat |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_feat |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
 	if (adapter->offloads.rss_hash_supported)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
@@ -2088,7 +2088,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 06ac8b06b5cb..3b1844e50982 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -54,8 +54,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 152098410fa2..be4007e3f3fe 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 		/* Each reta_conf is for 64 entries.
 		 * To support 128 we use 2 conf of 64.
 		 */
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
 			entry_value =
 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -139,7 +139,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -154,8 +154,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0 ; i < reta_size ; i++) {
-		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
 			reta_conf[reta_conf_idx].reta[reta_idx] =
 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -199,34 +199,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -235,10 +235,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -247,10 +247,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -268,11 +268,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -284,11 +284,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -334,43 +334,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -541,7 +541,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 1b567f01eae0..7cdb8ce463ed 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,10 +207,10 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
 
 	return 0;
 }
@@ -463,7 +463,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -705,7 +705,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -713,10 +713,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cfdd..d5493c98345d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -178,7 +178,7 @@ struct enic {
 	 */
 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
 	uint8_t rss_enable;
-	uint64_t rss_hf; /* ETH_RSS flags */
+	uint64_t rss_hf; /* RTE_ETH_RSS flags */
 	union vnic_rss_key rss_key;
 	union vnic_rss_cpu rss_cpu;
 
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8df7332bc5e0..c8bdaf1a8e79 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -297,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -323,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -435,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -774,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -806,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
 	 */
 	rss_cpu = enic->rss_cpu;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			rss_cpu.cpu[i / 4].b[i % 4] =
 				enic_rte_rq_idx_to_sop_idx(
@@ -883,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -969,8 +969,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1010,7 +1010,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1039,7 +1039,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index dfc7f5d1f94f..21b1fffb14f0 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,9 +1745,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c5777772a09e..918a9e170ff6 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,19 +201,19 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index b87c036e6014..82d595b1d1a0 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 29de39910c6e..a3a8a1c82e3a 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1172,51 +1172,51 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 17c73c4dc5ae..b7522a47a80b 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 66f4a5c6df2c..d256334bfde9 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,20 +1767,20 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1965,12 +1965,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2111,8 +2111,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2160,8 +2160,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2198,15 +2198,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2243,15 +2243,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2606,7 +2606,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2622,7 +2622,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index c2374ebb6759..4cd5a85d5f8d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,24 +732,24 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -846,20 +846,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -901,8 +901,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1650,8 +1650,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1672,8 +1672,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1859,13 +1859,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1879,14 +1879,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1930,7 +1930,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1951,14 +1951,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -1994,7 +1994,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2015,15 +2015,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2053,7 +2053,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2067,8 +2067,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 
 	/* update rss indir_tbl */
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2133,8 +2133,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 8753c340e790..3d0159d78778 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 693048f58704..8e0ccecb57a6 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2213,9 +2213,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2223,7 +2223,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2232,7 +2232,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2400,11 +2400,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2464,15 +2464,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2493,7 +2493,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2600,15 +2600,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2619,19 +2619,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2650,7 +2650,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2676,40 +2676,40 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_get_support(hw, PTP))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2793,7 +2793,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2806,29 +2806,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2848,8 +2848,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2861,7 +2861,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3207,31 +3207,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3559,39 +3559,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4254,14 +4254,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4511,7 +4511,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4552,7 +4552,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4672,8 +4672,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4684,8 +4684,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4735,7 +4735,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4762,7 +4762,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4881,10 +4881,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4912,7 +4912,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5094,19 +5094,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5344,20 +5344,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5373,26 +5373,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5427,28 +5427,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5456,11 +5456,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5594,9 +5594,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5869,7 +5869,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6080,17 +6080,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6236,7 +6236,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6536,7 +6536,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6826,7 +6826,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6858,7 +6858,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -6957,12 +6957,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e28056b1bd60..0f55fd4c83ad 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -190,10 +190,10 @@ struct hns3_mac {
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1076,9 +1076,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 54dbd4b798f2..7b784048b518 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -935,32 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1640,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1653,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1724,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1749,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2059,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2218,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2570,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 38a2ee58a651..da6918fddda3 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index 5dfe68cc4dbd..9a829d7011ad 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..85495bbe89d9 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
 	memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
 	       sizeof(rss_cfg->rss_indirection_tbl));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
 			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_lock(&hw->lock);
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
 						rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 602548a4f25b..920ee8ceeab9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1924,7 +1924,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1969,7 +1969,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2845,7 +2845,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3139,7 +3139,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4291,7 +4291,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4303,16 +4303,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index c8229e9076b5..dfea5d5b4c2f 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index ff434d2d33ed..455110361aac 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_get_support(hw, PTP))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0a4db0891d4a..293df887bf7c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1629,7 +1629,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1896,8 +1896,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1931,13 +1931,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2214,17 +2214,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2332,13 +2332,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2876,34 +2876,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2930,8 +2930,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2946,28 +2946,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -2984,9 +2984,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3720,33 +3720,33 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3805,7 +3805,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3819,17 +3819,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3868,7 +3868,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3915,12 +3915,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3934,12 +3934,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3998,37 +3998,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4111,17 +4111,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4137,10 +4137,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4287,7 +4287,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4440,7 +4440,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4456,8 +4456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4483,7 +4483,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4500,8 +4500,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -4818,7 +4818,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6104,10 +6104,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6236,9 +6236,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7098,7 +7098,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7133,7 +7133,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7727,25 +7727,25 @@ static int
 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
 {
 	switch (filter_type) {
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_OIP:
+	case RTE_ETH_TUNNEL_FILTER_OIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
 		break;
-	case ETH_TUNNEL_FILTER_IIP:
+	case RTE_ETH_TUNNEL_FILTER_IIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 		break;
 	default:
@@ -8711,16 +8711,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8746,12 +8746,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8843,7 +8843,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8851,7 +8851,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8930,7 +8930,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10267,16 +10267,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10504,7 +10504,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11012,7 +11012,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11060,7 +11060,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1d57b9617e66..d8042abbd9be 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -147,17 +147,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1063,7 +1063,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e41a84f1d737..9acaa1875105 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
@@ -3601,13 +3601,13 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
 }
 
 static uint16_t i40e_supported_tunnel_filter_types[] = {
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
-	ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IMAC,
-	ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC,
 };
 
 static int
@@ -3697,12 +3697,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 			break;
@@ -3724,7 +3724,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -3798,7 +3798,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					   vxlan_spec->vni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			vxlan_flag = 1;
@@ -3927,12 +3927,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 
@@ -3955,7 +3955,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -4050,7 +4050,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					   nvgre_spec->tni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			nvgre_flag = 1;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 6579b1a00b16..1229f2f7a1c7 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -102,47 +102,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -201,30 +201,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -232,68 +232,68 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -308,27 +308,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -564,29 +564,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -825,7 +825,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -932,7 +932,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1070,22 +1070,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 554b1142c136..a13bb81115f4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -1995,7 +1995,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2243,7 +2243,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -3417,7 +3417,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 2301e6301d7d..5e6eecc50116 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -166,7 +166,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index bd21d6422394..5f00d43950aa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -899,7 +899,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52ed98d62d0..0192164c35fa 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 12d5a2e48a9b..663c46b91dc5 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af4734..12f541f53926 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -50,18 +50,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 611f1f7722b0..df44df772e4e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -266,53 +266,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -331,13 +331,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -363,10 +363,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -467,7 +467,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -479,10 +479,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -512,8 +512,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -611,7 +611,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -961,34 +961,34 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1048,42 +1048,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1231,14 +1231,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1267,9 +1267,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1311,8 +1311,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(lut, vf->rss_lut, reta_size);
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -1348,8 +1348,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = vf->rss_lut[i];
 	}
@@ -1556,7 +1556,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 1f2d3772d105..248054f79efd 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -341,90 +341,90 @@ struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -442,7 +442,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -484,9 +484,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -504,7 +504,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -546,7 +546,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -580,52 +580,52 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
@@ -779,28 +779,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -808,39 +808,39 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -857,7 +857,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -865,87 +865,87 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -962,7 +962,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1059,10 +1059,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1073,27 +1073,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1103,9 +1103,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1122,15 +1122,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c1027..ac4db117f5cd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -617,7 +617,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e123..2d7f6b1b2dca 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 72a4fcab04a5..b47c51b8ebe4 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -906,7 +906,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 		    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -958,7 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					(_mm256_castsi128_si256(raw_desc_bh0),
 					raw_desc_bh1, 1);
 
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 12375d3d80bd..b8f2f69f12fc 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1141,7 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1193,7 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						(_mm256_castsi128_si256(raw_desc_bh0),
 						 raw_desc_bh1, 1);
 
-				if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+				if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1721,7 +1721,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e298..1de43b9b8ee2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -819,7 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index c9c01a14e349..7b7df5eebb6d 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -835,7 +835,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b8a537cb8556..a90e40964ec5 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -95,7 +95,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -576,7 +576,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -637,7 +637,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 
 	return 0;
@@ -652,8 +652,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -675,27 +675,27 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -925,42 +925,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -979,11 +979,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -1010,8 +1010,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 44fb38dbe7b1..b9fcfc80ad9b 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -143,28 +143,28 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -246,9 +246,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -345,7 +345,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -449,7 +449,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 2e7273cd1e93..fe546cf5159d 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1480,9 +1480,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2986,14 +2986,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -3003,7 +3003,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3013,7 +3013,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3023,7 +3023,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3034,7 +3034,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3045,7 +3045,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3056,7 +3056,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3067,7 +3067,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3078,7 +3078,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3088,7 +3088,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3098,7 +3098,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3108,7 +3108,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3118,7 +3118,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3128,7 +3128,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3138,7 +3138,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3281,8 +3281,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3562,8 +3562,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3675,40 +3675,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH |
-			DEV_RX_OFFLOAD_TIMESTAMP;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH |
+			RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3747,24 +3747,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3829,8 +3829,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3846,55 +3846,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -4370,15 +4370,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -4493,8 +4493,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4543,8 +4543,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -5453,7 +5453,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5477,7 +5477,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
@@ -5498,7 +5498,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
 	int ret;
 
 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
 		return -1;
 	}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 5845f44c860c..ff9bef17760b 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -116,19 +116,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 20a3204fab7e..35eff8b17d28 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,87 +373,87 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -472,17 +472,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -647,86 +647,86 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		}
 
-		if (rss_type & ETH_RSS_IPV4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -735,10 +735,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -747,10 +747,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -762,81 +762,81 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -870,7 +870,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -892,10 +892,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -907,9 +907,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -928,16 +928,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9f5..8406240d7209 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -303,7 +303,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 		}
 	}
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		/* Register mbuf field and flag for Rx timestamp */
 		err = rte_mbuf_dyn_rx_timestamp_register(
 				&ice_timestamp_dynfield_offset,
@@ -367,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1117,7 +1117,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1624,7 +1624,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 				ts_ns = ice_tstamp_convert_32b_64b(hw,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 				if (ice_timestamp_dynflag > 0) {
@@ -1942,7 +1942,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2373,7 +2373,7 @@ ice_recv_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2889,7 +2889,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3365,7 +3365,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 9725ac018043..8c870354619e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -473,7 +473,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5bba9887d296..6d2038975830 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -584,7 +584,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -994,7 +994,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 5b5250565e35..a04b6fee560a 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
@@ -287,7 +287,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		return -1;
 
 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 2a1ed90b641b..7ce80a442b35 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -307,8 +307,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -318,7 +318,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -334,8 +334,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -473,12 +473,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -490,9 +490,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -525,7 +525,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -972,18 +972,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -993,33 +993,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1482,14 +1482,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1515,9 +1515,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2141,13 +2141,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2179,16 +2179,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2234,29 +2234,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to update the register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* check mask whether need to read the register value first */
@@ -2290,29 +2290,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to read register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* read register and get the queue index */
@@ -2369,23 +2369,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2514,22 +2514,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2547,7 +2547,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 5e6c2ff30157..f56cad79e939 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -66,37 +66,37 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index 56132e8c6cd6..1d34ae2e1b15 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -847,23 +847,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1037,10 +1037,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1111,7 +1111,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1177,7 +1177,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1221,20 +1221,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1242,7 +1242,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1279,12 +1279,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2253,10 +2253,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index f94a1fed0a38..c688c3735c06 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -387,17 +387,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -411,24 +411,24 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -463,9 +463,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -487,14 +487,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -545,12 +545,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = tbl_sz / RTE_RETA_GROUP_SIZE;
+	num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if (reta_conf[i].mask & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -585,12 +585,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-			&lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
-			RTE_RETA_GROUP_SIZE);
+			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+			RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -618,17 +618,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -660,17 +660,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -842,15 +842,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -874,12 +874,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -896,7 +896,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index a1f9ce2d81cb..5e8fdf3893ad 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 4d16a39c6b6d..e3df7c56debe 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -203,11 +203,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -743,11 +743,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 063a9c6a6f7f..17088585757f 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,30 +67,30 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2399,10 +2399,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2460,9 +2460,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2518,9 +2518,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 46c95425adfb..7fd2c539e002 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1857,7 +1857,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1872,7 +1872,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1959,10 +1959,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2083,7 +2083,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2100,7 +2100,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2122,17 +2122,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2143,19 +2143,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		ixgbe_vlan_hw_strip_config(dev);
-	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2194,10 +2193,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2221,18 +2220,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2242,12 +2241,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2256,12 +2255,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2276,13 +2275,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2291,15 +2290,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2308,39 +2307,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2349,7 +2348,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2373,8 +2372,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2619,15 +2618,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2704,17 +2703,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2728,7 +2727,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2746,17 +2745,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3832,7 +3831,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3842,9 +3841,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3883,21 +3882,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3966,9 +3965,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4211,11 +4210,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4237,8 +4236,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4274,37 +4273,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4521,7 +4520,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4740,13 +4739,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5044,8 +5043,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5092,8 +5091,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5255,22 +5254,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5330,8 +5329,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5568,10 +5567,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5702,12 +5701,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5721,15 +5720,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -6724,15 +6723,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7143,16 +7142,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7162,10 +7161,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7221,7 +7220,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7232,7 +7231,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7256,9 +7255,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7271,7 +7270,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7524,7 +7523,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7556,7 +7555,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7653,12 +7652,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7690,11 +7689,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 950fb2d2450c..876b670f2682 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -114,15 +114,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 27322ab9038a..bdc9d4796c02 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 295e5a39b245..9f1bd0a62ba4 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -104,15 +104,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -263,15 +263,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -674,29 +674,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b263dfe1d574..9e5716f935a2 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2592,26 +2592,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2780,7 +2780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3021,7 +3021,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3032,19 +3032,19 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3054,20 +3054,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3122,7 +3122,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3507,23 +3507,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3605,23 +3605,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3697,12 +3697,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3727,7 +3727,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3736,7 +3736,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3752,7 +3752,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3776,7 +3776,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3858,7 +3858,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3874,12 +3874,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3889,7 +3889,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3907,12 +3907,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3922,7 +3922,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3949,7 +3949,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3976,7 +3976,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4145,7 +4145,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4158,8 +4158,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4172,7 +4172,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4183,7 +4183,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4199,15 +4199,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4257,9 +4257,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
 	}
 	if (config_dcb_tx) {
 		/* Only support an equally distributed
@@ -4273,7 +4272,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4309,7 +4308,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4323,7 +4322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4344,12 +4343,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4405,7 +4404,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4526,11 +4525,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4551,17 +4550,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4588,21 +4587,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4613,18 +4612,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4658,7 +4657,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4671,13 +4670,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4885,7 +4884,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4913,10 +4912,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4924,8 +4923,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4939,7 +4938,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4948,7 +4947,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5070,7 +5069,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5107,7 +5106,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5116,7 +5115,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5158,11 +5157,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5177,7 +5176,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5187,7 +5186,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5393,9 +5392,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5683,7 +5682,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5732,7 +5731,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
@@ -5740,8 +5739,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index a1764f2b08af..668a5b9814f6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -227,7 +227,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 005e60668a8b..cd34d4098785 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -277,7 +277,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index ae03ea6e9db3..ac8976062fa7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index 9fa75984fb31..bd528ff346c7 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..eef6f6661c74 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index cb9f7c8e8200..c428caf44189 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 0fc3f0ab66a9..90ffe31b9fda 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -519,10 +519,10 @@ lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
 
-	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				rss_state->itable[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -562,12 +562,12 @@ lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
-		       RTE_RETA_GROUP_SIZE);
+		       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+		       RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -595,17 +595,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -673,42 +673,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -757,7 +757,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -814,7 +814,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -912,10 +912,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -923,18 +923,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1086,8 +1086,8 @@ lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
 
 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
 				  i % eth_dev->data->nb_rx_queues : 0);
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
 	}
@@ -1103,10 +1103,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1484,7 +1484,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1505,11 +1505,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1721,9 +1721,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index 364e818d65c1..8533e39f6957 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 980150293e86..9deb7a5f1360 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -199,7 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
 	dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1381,10 +1381,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index ee2d2b75e59a..781ee256df71 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,12 +682,12 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -703,7 +703,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -832,7 +832,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 	if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
 		uint32_t sges_n;
 
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7d8c4f2a2223..0db2e55befd3 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a823d26bebf9..d207ec053e07 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1350,8 +1350,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1634,7 +1634,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e28cc461b914..7727dfb4196c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1488,10 +1488,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a15f86616d49..ea17a86f4955 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1217,7 +1217,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c914a7120cca..5dc0400e8bdc 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -292,7 +292,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -546,8 +546,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -555,11 +555,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -570,8 +570,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -579,11 +579,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -636,32 +636,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1072,7 +1072,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1625,14 +1625,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6388,8 +6388,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -7013,7 +7013,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8681,7 +8681,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5c68d4f7d742..ff85c1c013a5 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -328,18 +328,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e31d4d846825..759fe57f19d6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10837,9 +10837,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10847,9 +10847,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10863,11 +10863,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10875,11 +10875,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14418,9 +14418,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14429,9 +14429,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14440,11 +14440,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14453,11 +14453,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14605,8 +14605,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 1627c3905fa4..8a455cbf22f4 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1816,7 +1816,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1829,7 +1829,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..a4f690039e24 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
@@ -170,8 +170,8 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	/* Fill each entry of the table even if its bit is not set. */
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
 			(*priv->reta_idx)[i];
 	}
 	return 0;
@@ -209,8 +209,8 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
 		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d8d7e481dea0..eb4dc3375248 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,22 +333,22 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -362,7 +362,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -694,7 +694,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1325,7 +1325,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pktlen = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
@@ -1428,7 +1428,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1472,7 +1472,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pktlen,
@@ -1487,7 +1487,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pktlen;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1548,9 +1548,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1559,11 +1559,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1593,7 +1593,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1f92250f5edd..02bb9307ae61 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,42 +98,42 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso) {
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
-				offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 		}
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -978,19 +978,19 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 						    MLX5_MAX_TSO_HEADER);
 		txq_ctrl->txq.tso_en = 1;
 	}
-	if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
-	   ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
-	   ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
 	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
 		txq_ctrl->txq.tunnel_en = 1;
-	txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
 				  txq_ctrl->txq.offloads) && (config->swp &
 				  MLX5_SW_PARSING_TSO_CAP)) |
-				((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
 				 txq_ctrl->txq.offloads) && (config->swp &
 				 MLX5_SW_PARSING_CSUM_CAP));
 }
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 8937ec0d3037..7f7b545ca63a 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -485,8 +485,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	if (config->hw_padding) {
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index 2a0288087357..10fe6d828ccd 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,7 +126,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -151,10 +151,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -503,28 +503,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index 6428f9ff7931..64aadcffd85a 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,14 +54,14 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index 9836bb071a82..62d8aa586dae 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -734,7 +734,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index a6458d2ce9b5..d0746b0d1215 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,15 +58,15 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -442,14 +442,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -483,8 +483,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -502,7 +502,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -524,7 +524,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -623,7 +623,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -644,7 +644,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -664,14 +664,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -893,7 +893,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -929,11 +929,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1202,30 +1202,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1709,11 +1709,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1733,9 +1733,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1864,13 +1864,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1879,7 +1879,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2022,7 +2022,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2182,7 +2182,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2191,10 +2191,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2240,19 +2240,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2329,11 +2329,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3152,7 +3152,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..9c4ae80e7e16 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -306,8 +306,8 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -346,8 +346,8 @@ static int hn_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index 62ba39636cd8..1b63b27e0c3e 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 99d93ebf4667..3c39937816a4 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index 3ebb332ae46c..f76e2ba64621 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 0003fd54dde5..3ea697c54462 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,19 +359,19 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
 	hw->mtu = dev->data->mtu;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -383,13 +383,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -397,7 +397,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -485,14 +485,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -504,15 +504,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -701,26 +701,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -757,22 +757,22 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -843,7 +843,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -973,12 +973,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1018,8 +1018,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1099,8 +1099,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1138,22 +1138,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1223,22 +1223,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 1169ea77a8c7..e08e594b04fe 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -141,7 +141,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 62cb3536e0c9..817fe64dbceb 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -103,7 +103,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 25b9e5b1ce1b..ca03469d0e6d 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -391,9 +391,9 @@ eth_rss_reta_update(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
 		internal->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -416,8 +416,8 @@ eth_rss_reta_query(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 	}
@@ -548,8 +548,8 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
-	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index f578123ed00b..5b8cbec67b5d 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -530,13 +530,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -571,7 +571,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->mtu > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -843,10 +843,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1356,7 +1356,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 3a02824e3948..c493fa7a03ed 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,23 +55,23 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 9c5d748e8575..72da8856bd86 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -665,7 +665,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -692,7 +692,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -707,29 +707,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -768,43 +768,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -914,8 +914,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1848,21 +1848,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2235,7 +2235,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2563,8 +2563,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 4557a0ee1945..a5282c6c1231 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,43 +117,43 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 22a8af5cba45..d5caaa326a5a 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -26,11 +26,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -568,17 +568,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index c2a36883cbf2..e1654ef5b284 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -890,8 +890,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -933,8 +933,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 79b92fda8a4a..91267bbb8182 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1186,7 +1186,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..68cef1caa394 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -85,8 +85,8 @@ otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				rss->ind_tbl[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -118,8 +118,8 @@ otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = rss->ind_tbl[j];
 	}
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index ffeade5952dc..986902287b67 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ff299f00b913..c60190074926 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index 698d22e22685..74dc36a17648 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,14 +33,14 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index aa4dcd33cc79..9338b30672ec 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,7 +954,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index d695c5eef7b0..ec29fd6bc53c 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -136,10 +136,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -659,7 +659,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -714,7 +714,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 4cc002ee8fab..047010e15ed0 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -601,9 +601,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 27f6932dc74e..c907d7fd8312 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1069,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	/* Configure default RETA */
 	memset(reta_conf, 0, sizeof(reta_conf));
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-		id = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		id = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		q = i % QEDE_RSS_COUNT(eth_dev);
 		reta_conf[id].reta[pos] = q;
 	}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1312,7 +1312,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 			return -ENOMEM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1321,8 +1321,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1385,34 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1424,17 +1424,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1461,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1473,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2012,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2041,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2088,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2221,7 +2221,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2245,8 +2245,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 
 	for_each_hwfn(edev, i) {
 		for (j = 0; j < reta_size; j++) {
-			idx = j / RTE_RETA_GROUP_SIZE;
-			shift = j % RTE_RETA_GROUP_SIZE;
+			idx = j / RTE_ETH_RETA_GROUP_SIZE;
+			shift = j % RTE_ETH_RETA_GROUP_SIZE;
 			if (reta_conf[idx].mask & (1ULL << shift)) {
 				entry = reta_conf[idx].reta[shift];
 				fid = entry * edev->num_hwfns + i;
@@ -2282,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift)) {
 			entry = qdev->rss_ind_table[i];
 			reta_conf[idx].reta[shift] = entry;
@@ -2718,16 +2718,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	adapter->ipgre.num_filters = 0;
 	if (is_vf) {
 		adapter->vxlan.enable = true;
-		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
 		adapter->geneve.enable = true;
-		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					      ETH_TUNNEL_FILTER_IVLAN;
+		adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					      RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
 		adapter->ipgre.enable = true;
-		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 	} else {
 		adapter->vxlan.enable = false;
 		adapter->geneve.enable = false;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..440440423a32 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -20,97 +20,97 @@ const struct _qede_udp_tunn_types {
 	const char *string;
 } qede_tunn_types[] = {
 	{
-		ETH_TUNNEL_FILTER_OMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC,
 		ECORE_FILTER_MAC,
 		ECORE_TUNN_CLSS_MAC_VLAN,
 		"outer-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_VNI,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_VLAN,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"outer-mac and vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
 		"vni and inner-mac",
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"vni and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_OIP,
+		RTE_ETH_TUNNEL_FILTER_OIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-IP"
 	},
 	{
-		ETH_TUNNEL_FILTER_IIP,
+		RTE_ETH_TUNNEL_FILTER_IIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"inner-IP"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"OMAC_TENID_IMAC"
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index c2263787b4ec..d585db8b61e8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 0440019e07e1..db10f035dfcb 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index f79f4d5ffc94..79a27c7703a8 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -105,13 +105,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -120,17 +120,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -400,10 +400,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -898,7 +898,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -907,8 +907,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d958fd642fb1..eeb73a7530ef 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -979,11 +979,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 				  SFC_DP_RX_FEAT_INTR |
 				  SFC_DP_RX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index e166fda888b1..67980a587fe4 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -971,16 +971,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS |
 				  SFC_DP_TX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index de0fac899f77..26973075ef4d 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -105,19 +105,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -145,8 +145,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -988,16 +988,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -1028,16 +1028,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1312,7 +1312,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1522,9 +1522,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
@@ -1651,7 +1651,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	/*
 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
-	 * hence, conversion is done here to derive a correct set of ETH_RSS
+	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
 	 * flags which corresponds to the active EFX configuration stored
 	 * locally in 'sfc_adapter' and kept up-to-date
 	 */
@@ -1777,8 +1777,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp = entry / RTE_RETA_GROUP_SIZE;
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
 		if ((reta_conf[grp].mask >> grp_idx) & 1)
 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1827,10 +1827,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 		struct rte_eth_rss_reta_entry64 *grp;
 
-		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
 		if (grp->mask & (1ull << grp_idx)) {
 			if (grp->reta[grp_idx] >= rss->channels) {
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 81b9923644aa..23399fcab252 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -391,7 +391,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index 5320d8903dac..27b02b1119fb 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -573,66 +573,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c
index 2500b14cb006..9d88d554c1ba 100644
--- a/drivers/net/sfc/sfc_repr.c
+++ b/drivers/net/sfc/sfc_repr.c
@@ -405,7 +405,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 	}
 
 	switch (conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (nb_rx_queues != 1) {
 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
 				 nb_rx_queues);
@@ -420,7 +420,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 			ret = -EINVAL;
 		}
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		break;
 	default:
 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
@@ -428,7 +428,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 		break;
 	}
 
-	if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
 		sfcr_err(sr, "Tx mode MQ modes not supported");
 		ret = -EINVAL;
 	}
@@ -553,8 +553,8 @@ sfc_repr_dev_link_update(struct rte_eth_dev *dev,
 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
 	} else {
 		memset(&link, 0, sizeof(link));
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c60ef17a922a..23df27c8f45a 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -648,9 +648,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -931,7 +931,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -1140,7 +1140,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1166,15 +1166,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
@@ -1211,7 +1211,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1313,19 +1313,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1645,10 +1645,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1665,16 +1665,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1820,7 +1820,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 13392cdd5a09..0273788c20ce 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -529,23 +529,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -876,9 +876,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1242,13 +1242,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 3c6a285e3c5e..6a084e3e1b1b 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1202,10 +1202,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e4f1ad45219e..5d5350d78e03 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1760,7 +1760,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1768,7 +1768,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2262,7 +2262,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2436,7 +2436,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 328d6d56d921..38a2ddc633b5 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -390,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -431,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -479,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = tbl[j];
 	}
@@ -509,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				tbl[j] = reta_conf[i].reta[j];
 	}
@@ -807,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -870,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -992,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1382,11 +1382,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1415,10 +1415,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1582,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1711,7 +1711,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	/* Setup scatter mode if needed by jumbo */
 	if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU */
@@ -1896,8 +1896,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1909,8 +1909,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1920,7 +1920,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1955,7 +1955,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2032,8 +2032,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 5d38750d6313..cb474e26b81e 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,32 +16,32 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 7b46ffb68635..0b0f9db7cb2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -998,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1033,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1053,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1138,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1240,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1254,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1275,25 +1275,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1331,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1357,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1378,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1393,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1414,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1429,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1446,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1495,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1694,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1763,8 +1763,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
@@ -1773,20 +1773,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2601,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2634,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -2713,8 +2713,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2733,34 +2733,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2990,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3221,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3359,16 +3359,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3400,16 +3400,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3576,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3605,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4264,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4628,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4639,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4663,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4678,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4908,7 +4908,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4939,7 +4939,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4979,7 +4979,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4987,7 +4987,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4995,7 +4995,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5003,7 +5003,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5035,7 +5035,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5045,7 +5045,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5055,7 +5055,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5065,7 +5065,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index fd65d89ffe7d..8304b68292da 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -60,15 +60,15 @@
 #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 43dc0ed39b75..283b52e8f3db 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -486,14 +486,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -574,22 +574,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -647,8 +647,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -891,10 +891,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index a48972b1a381..30be2873307a 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -101,15 +101,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -256,13 +256,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -611,29 +611,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 7e18dcce0a86..1204dc5499a5 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1960,7 +1960,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1970,34 +1970,34 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2222,32 +2222,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2349,7 +2349,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2599,7 +2599,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2900,20 +2900,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2930,20 +2930,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2984,39 +2984,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3046,7 +3046,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3083,12 +3083,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3103,7 +3103,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3111,7 +3111,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3130,7 +3130,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3151,7 +3151,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3221,7 +3221,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3237,12 +3237,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3252,7 +3252,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3270,12 +3270,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3285,7 +3285,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3312,7 +3312,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3339,7 +3339,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3475,7 +3475,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3486,8 +3486,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3500,7 +3500,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3511,7 +3511,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3527,15 +3527,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3576,7 +3576,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3592,7 +3592,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3634,7 +3634,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3648,7 +3648,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3719,12 +3719,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3780,7 +3780,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3904,11 +3904,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3931,15 +3931,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3962,21 +3962,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3987,18 +3987,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4028,7 +4028,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4038,13 +4038,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4107,10 +4107,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4118,22 +4118,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4273,7 +4273,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4316,7 +4316,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4344,7 +4344,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4354,7 +4354,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4391,11 +4391,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4410,7 +4410,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4419,7 +4419,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4542,8 +4542,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4851,7 +4851,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4903,7 +4903,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4912,8 +4912,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5084,7 +5084,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a7935a716de9..27f81a5cafc5 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -823,7 +823,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -858,7 +858,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1124,7 +1124,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1273,9 +1273,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 047d3f43a3cf..74ede2aeccc1 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -712,7 +712,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1771,7 +1771,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1785,7 +1785,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1884,7 +1884,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1955,22 +1955,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2086,14 +2086,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2111,20 +2111,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2136,15 +2136,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2156,12 +2156,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2214,7 +2214,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2241,10 +2241,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2437,7 +2437,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2478,28 +2478,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2512,8 +2512,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2523,8 +2523,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2546,32 +2546,32 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
 		/*
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a19895af1f17..26d9edf5319c 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,20 +41,20 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -398,9 +398,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -486,8 +486,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -547,7 +547,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -843,15 +843,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -863,7 +863,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -930,7 +930,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1039,9 +1039,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1365,7 +1365,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1447,10 +1447,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1503,7 +1503,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1573,8 +1573,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1583,8 +1583,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 8950175460f0..ef858ac9512f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index b01c4c01f9c9..870100fa4f11 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 68e3c13730ad..a9fef2297842 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -71,11 +71,11 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -328,7 +328,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 6352a715c0d9..3f41d8e5965d 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -115,17 +115,17 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -149,9 +149,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -241,9 +241,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 8c4a8feec0c2..c681e237ea46 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,15 +80,15 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -126,9 +126,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 1bc675962bf3..cdd9e9b60bd8 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index e26be8edf28f..193a16463449 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,13 +283,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -311,12 +311,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 476b147bdfcc..1b841d46ad93 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,13 +614,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -642,9 +642,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index 8a43f6ac0f92..6185b340600c 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -212,9 +212,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index dd8a33d036ee..bfc1949c8428 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index ccfee585f850..b1aa2767a0af 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,12 +819,12 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -852,9 +852,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 8644454a9aef..0307709f2b4a 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -149,13 +149,13 @@ static struct rte_eth_conf port_conf = {
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -624,7 +624,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 9ba02e687adb..0290767af473 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -77,11 +77,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 4f0e12e62447..a9f9bd477007 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -161,22 +161,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -738,7 +738,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1096,9 +1096,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5f5ec260f315..feddd84d1551 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -234,19 +234,19 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1455,10 +1455,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1909,7 +1909,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2212,8 +2212,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 	local_port_conf.rxmode.mtu = mtu_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2236,12 +2236,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2299,7 +2299,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2660,7 +2660,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 87538dccc879..32670f80bc2b 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -115,8 +115,8 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -620,7 +620,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 1790ec024072..f780be712ec0 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -95,7 +95,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -608,9 +608,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -688,7 +688,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index c646f1748ca7..42c04abbbb34 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -216,11 +216,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1808,7 +1808,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2632,9 +2632,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 9040be5ed9b6..cf3d1b8aaf40 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -14,7 +14,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -22,9 +22,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -60,9 +60,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 06280321b1f2..092ea0189c7f 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -726,7 +726,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -869,9 +869,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 07271affb4a9..78e43f9c091e 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -478,7 +478,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -650,9 +650,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index f3deeba0a665..3edabd1dd19b 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -95,7 +95,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -606,7 +606,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 1890c88a5b01..fea414ae5929 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -124,19 +124,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1936,7 +1936,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2004,7 +2004,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2088,9 +2088,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 05385807e83e..7f00c65609ed 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,17 +111,17 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,7 +607,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -731,7 +731,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -828,9 +828,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 6aa1b66ecfcc..5a4359a368b5 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -250,18 +250,18 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -2197,7 +2197,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2510,7 +2510,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2638,9 +2638,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index f27c76bb7a73..51cbf81f1afa 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,18 +120,18 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -903,7 +903,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -988,7 +988,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1053,15 +1053,15 @@ l3fwd_poll_resource_setup(void)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index e4542df11f87..8714acddd110 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -147,7 +147,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -507,7 +507,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -634,9 +634,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 1ad71ca7ec5f..23307073c904 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -94,7 +94,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -213,7 +213,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 01dc3acf34d5..85955375f1bf 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -176,18 +176,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -218,9 +218,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -392,7 +392,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 4f6982bc1289..b01ac60fd196 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 5de5df997ee9..baeee9298d57 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -307,18 +307,18 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -3441,7 +3441,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3494,7 +3494,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -3593,9 +3593,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 4f20dfc4be06..569207a79d62 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -165,11 +165,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 229a277032cb..979d9eb9e9d0 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -193,14 +193,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index c32d2e12e633..743bae2da50a 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,18 +51,18 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -332,8 +332,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -378,8 +378,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1367569c65db..9b34e4a76b1b 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -60,7 +60,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -105,9 +105,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6845c396b8d9..1903d8b095a1 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -141,17 +141,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 9ebd88bac20e..074fee5b26b2 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -96,7 +96,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -115,9 +115,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -277,7 +277,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index fd7207aee758..16435ee3ccc2 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -49,9 +49,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 999809e6ed41..49c134a3042f 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -110,23 +110,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -134,7 +134,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -291,9 +291,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -557,8 +557,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index e19d79a40802..b159291d77ce 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -73,9 +73,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -270,7 +270,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index ee7f4324e141..1f336082e5c1 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -66,12 +66,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -79,7 +79,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -157,11 +157,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -259,9 +259,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 14c20e6a8b26..1a19f1799bd2 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -60,8 +60,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -69,11 +69,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -81,7 +81,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -89,12 +89,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -103,7 +103,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -157,7 +157,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -173,11 +173,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -271,9 +271,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -382,9 +382,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -404,9 +404,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 0174ba03d7f3..c134b878684e 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -116,7 +116,7 @@ struct rte_eth_dev_data {
 			/**< Device Ethernet link address.
 			 *   @see rte_eth_dev_release_port()
 			 */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 			/**< Bitmap associating MAC addresses to pools. */
 	struct rte_ether_addr *hash_mac_addrs;
 			/**< Device Ethernet MAC addresses of hash filtering.
@@ -1657,23 +1657,23 @@ struct rte_eth_syn_filter {
 /**
  * filter type of tunneling packet
  */
-#define ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
-					ETH_TUNNEL_FILTER_TENID | \
-					ETH_TUNNEL_FILTER_IMAC)
+#define RTE_ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
+#define RTE_ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define RTE_ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define RTE_ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
+
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+						RTE_ETH_TUNNEL_FILTER_IVLAN | \
+						RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC (RTE_ETH_TUNNEL_FILTER_OMAC | \
+					       RTE_ETH_TUNNEL_FILTER_TENID | \
+					       RTE_ETH_TUNNEL_FILTER_IMAC)
 
 /**
  *  Select IPv4 or IPv6 for tunnel filters.
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 3b8ef9ef22e7..49ff506851cf 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -101,9 +101,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -128,14 +125,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1173,32 +1170,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1503,7 +1500,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t max_rx_pktlen;
 		uint32_t overhead_len;
 
@@ -1560,12 +1557,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2174,7 +2171,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * size is supported by the configured device.
 	 */
 	/* Get the real Ethernet overhead length */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t overhead_len;
 		uint32_t max_rx_pktlen;
 		int ret;
@@ -2754,21 +2751,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2792,14 +2789,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3706,7 +3703,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3793,44 +3790,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3875,17 +3872,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -3962,7 +3959,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -3980,7 +3977,7 @@ eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
 {
 	uint16_t i, num;
 
-	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
 	for (i = 0; i < num; i++) {
 		if (reta_conf[i].mask)
 			return 0;
@@ -4002,8 +3999,8 @@ eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
 			RTE_ETHDEV_LOG(ERR,
@@ -4159,7 +4156,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4185,7 +4182,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4326,8 +4323,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -6236,7 +6233,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 69766eaae2d4..5f9fe0f55953 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -249,7 +249,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -279,43 +279,75 @@ struct rte_eth_stats {
 /**@{@name Link speed capabilities
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG  (0 <<  0)  /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG	RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED    (1 <<  0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED	RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD   (1 <<  1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD	RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M      (1 <<  2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M	RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD  (1 <<  3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD	RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M     (1 <<  4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M	RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G       (1 <<  5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G	RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G     (1 <<  6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G	RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G       (1 <<  7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G	RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G      (1 <<  8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G	RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G      (1 <<  9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G	RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G      (1 << 10)  /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G	RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G      (1 << 11)  /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G	RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G      (1 << 12)  /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G	RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G      (1 << 13)  /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G	RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G     (1 << 14)  /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G	RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G     (1 << 15)  /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G	RTE_ETH_LINK_SPEED_200G
 /**@}*/
 
 /**@{@name Link speed
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE	RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M	RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M	RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G	RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G	RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G	RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G	RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G	RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G	RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G	RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G	RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G	RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G	RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G	RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN	RTE_ETH_SPEED_NUM_UNKNOWN
 /**@}*/
 
 /**
@@ -323,21 +355,27 @@ struct rte_eth_stats {
  */
 __extension__
 struct rte_eth_link {
-	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
-	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
-	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /**@{@name Link negotiation
  * Constants used in link management.
  */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX	RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX	RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN		RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP		RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED		RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG	RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 /**@}*/
 
@@ -354,9 +392,12 @@ struct rte_eth_thresh {
 /**@{@name Multi-queue mode
  * @see rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1 /**< Enable RSS. @see rte_eth_rss_conf */
-#define ETH_MQ_RX_DCB_FLAG  0x2 /**< Enable DCB. */
-#define ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG	RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG	RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG	RTE_ETH_MQ_RX_VMDQ_FLAG
 /**@}*/
 
 /**
@@ -365,50 +406,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB,RSS or VMDQ mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For RX side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For RX side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDQ, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDQ */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDQ+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDQ and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the RX features of an Ethernet port.
@@ -421,7 +461,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -436,12 +476,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a vlan filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -512,74 +557,113 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               (1ULL << 2)
-#define ETH_RSS_FRAG_IPV4          (1ULL << 3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
-#define ETH_RSS_IPV6               (1ULL << 8)
-#define ETH_RSS_FRAG_IPV6          (1ULL << 9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
-#define ETH_RSS_L2_PAYLOAD         (1ULL << 14)
-#define ETH_RSS_IPV6_EX            (1ULL << 15)
-#define ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
-#define ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
-#define ETH_RSS_PORT               (1ULL << 18)
-#define ETH_RSS_VXLAN              (1ULL << 19)
-#define ETH_RSS_GENEVE             (1ULL << 20)
-#define ETH_RSS_NVGRE              (1ULL << 21)
-#define ETH_RSS_GTPU               (1ULL << 23)
-#define ETH_RSS_ETH                (1ULL << 24)
-#define ETH_RSS_S_VLAN             (1ULL << 25)
-#define ETH_RSS_C_VLAN             (1ULL << 26)
-#define ETH_RSS_ESP                (1ULL << 27)
-#define ETH_RSS_AH                 (1ULL << 28)
-#define ETH_RSS_L2TPV3             (1ULL << 29)
-#define ETH_RSS_PFCP               (1ULL << 30)
-#define ETH_RSS_PPPOE		   (1ULL << 31)
-#define ETH_RSS_ECPRI		   (1ULL << 32)
-#define ETH_RSS_MPLS		   (1ULL << 33)
-#define ETH_RSS_IPV4_CHKSUM	   (1ULL << 34)
-
-/**
- * The ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
- * It is similar to ETH_RSS_PORT that they don't specify the specific type of
+#define RTE_ETH_RSS_IPV4               (1ULL << 2)
+#define ETH_RSS_IPV4		RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          (1ULL << 3)
+#define ETH_RSS_FRAG_IPV4	RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_TCP	RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_UDP	RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP	RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER	RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               (1ULL << 8)
+#define ETH_RSS_IPV6		RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          (1ULL << 9)
+#define ETH_RSS_FRAG_IPV6	RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_TCP	RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_UDP	RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP	RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER	RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         (1ULL << 14)
+#define ETH_RSS_L2_PAYLOAD	RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            (1ULL << 15)
+#define ETH_RSS_IPV6_EX		RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        (1ULL << 16)
+#define ETH_RSS_IPV6_TCP_EX	RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        (1ULL << 17)
+#define ETH_RSS_IPV6_UDP_EX	RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               (1ULL << 18)
+#define ETH_RSS_PORT		RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              (1ULL << 19)
+#define ETH_RSS_VXLAN		RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             (1ULL << 20)
+#define ETH_RSS_GENEVE		RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              (1ULL << 21)
+#define ETH_RSS_NVGRE		RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               (1ULL << 23)
+#define ETH_RSS_GTPU		RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                (1ULL << 24)
+#define ETH_RSS_ETH		RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             (1ULL << 25)
+#define ETH_RSS_S_VLAN		RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             (1ULL << 26)
+#define ETH_RSS_C_VLAN		RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                (1ULL << 27)
+#define ETH_RSS_ESP		RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 (1ULL << 28)
+#define ETH_RSS_AH		RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             (1ULL << 29)
+#define ETH_RSS_L2TPV3		RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               (1ULL << 30)
+#define ETH_RSS_PFCP		RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              (1ULL << 31)
+#define ETH_RSS_PPPOE		RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              (1ULL << 32)
+#define ETH_RSS_ECPRI		RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               (1ULL << 33)
+#define ETH_RSS_MPLS		RTE_ETH_RSS_MPLS
+#define RTE_ETH_RSS_IPV4_CHKSUM        (1ULL << 34)
+#define ETH_RSS_IPV4_CHKSUM	RTE_ETH_RSS_IPV4_CHKSUM
+
+/**
+ * The RTE_ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
+ * It is similar to RTE_ETH_RSS_PORT that they don't specify the specific type of
  * L4 header. This macro is defined to replace some specific L4 (TCP/UDP/SCTP)
  * checksum type for constructing the use of RSS offload bits.
  *
  * Due to above reason, some old APIs (and configuration) don't support
- * ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
+ * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
  *
  * For the case that checksum is not used in an UDP header,
  * it takes the reserved value 0 as input for the hash function.
  */
-#define ETH_RSS_L4_CHKSUM          (1ULL << 35)
+#define RTE_ETH_RSS_L4_CHKSUM          (1ULL << 35)
+#define ETH_RSS_L4_CHKSUM	RTE_ETH_RSS_L4_CHKSUM
 
 /*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
  * more specific input set selection. These bits are defined starting
  * from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
-#define ETH_RSS_L3_DST_ONLY        (1ULL << 62)
-#define ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
-#define ETH_RSS_L4_DST_ONLY        (1ULL << 60)
-#define ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
-#define ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        (1ULL << 63)
+#define ETH_RSS_L3_SRC_ONLY	RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        (1ULL << 62)
+#define ETH_RSS_L3_DST_ONLY	RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        (1ULL << 61)
+#define ETH_RSS_L4_SRC_ONLY	RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        (1ULL << 60)
+#define ETH_RSS_L4_DST_ONLY	RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        (1ULL << 59)
+#define ETH_RSS_L2_SRC_ONLY	RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        (1ULL << 58)
+#define ETH_RSS_L2_DST_ONLY	RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
- * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * https:tools.ietf.org/html/rfc6052
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
  */
 #define RTE_ETH_RSS_L3_PRE32	   (1ULL << 57)
 #define RTE_ETH_RSS_L3_PRE40	   (1ULL << 56)
@@ -601,22 +685,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT	RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST	RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST	RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK	RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)	RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -631,219 +720,312 @@ struct rte_eth_rss_conf {
 static inline uint64_t
 rte_eth_rss_hf_refine(uint64_t rss_hf)
 {
-	if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 
-	if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /**< Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64	RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128	RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256	RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512	RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE	RTE_ETH_RETA_GROUP_SIZE
 
 /**@{@name VMDq and DCB maximums */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS	RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES	RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES	RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES	RTE_ETH_DCB_NUM_QUEUES
 /**@}*/
 
 /**@{@name DCB capabilities */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT	RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT	RTE_ETH_DCB_PFC_SUPPORT
 /**@}*/
 
 /**@{@name VLAN offload bits */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
-
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD	RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD	RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD	RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD	RTE_ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK	RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK	RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK	RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK	RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX		RTE_ETH_VLAN_ID_MAX
 /**@}*/
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR	RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY	RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /**@{@name VMDq Rx mode
  * @see rte_eth_vmdq_rx_conf.rx_mode
  */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG	RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC	RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC	RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST	RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST	RTE_ETH_VMDQ_ACCEPT_MULTICAST
 /**@}*/
 
+/** Maximum nb. of vlan per mirror rule */
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS	RTE_ETH_MIRROR_MAX_VLANS
+
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP	RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT	RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT	RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN		RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN	RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
+
+/**
+ * A structure used to configure VLAN traffic mirror of an Ethernet port.
+ */
+struct rte_eth_vlan_mirror {
+	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
+	/** VLAN ID list for vlan mirroring. */
+	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
+};
+
+/**
+ * A structure used to configure traffic mirror of an Ethernet port.
+ */
+struct rte_eth_mirror_conf {
+	uint8_t rule_type; /**< Mirroring rule type */
+	uint8_t dst_pool;  /**< Destination pool for this mirror rule. */
+	uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
+	/** VLAN ID setting for VLAN mirroring. */
+	struct rte_eth_vlan_mirror vlan;
+};
+
 /**
  * A structure used to configure 64 entries of Redirection Table of the
  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
@@ -853,7 +1035,7 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 struct rte_eth_rss_reta_entry64 {
 	uint64_t mask;
 	/**< Mask bits indicate which entries need to be updated/queried. */
-	uint16_t reta[RTE_RETA_GROUP_SIZE];
+	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
 	/**< Group of 64 redirection table entries. */
 };
 
@@ -862,38 +1044,44 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDQ configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_dcb_tx_conf {
 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_dcb_tx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_tx_conf {
@@ -919,8 +1107,8 @@ struct rte_eth_vmdq_dcb_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 	/**< Selects a queue in a pool */
 };
 
@@ -931,7 +1119,7 @@ struct rte_eth_vmdq_dcb_conf {
  * Using this feature, packets are routed to a pool of queues. By default,
  * the pool selection is based on the MAC address, the vlan id in the
  * vlan tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
  * selection using only the MAC address. MAC address to pool mapping is done
  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
  * corresponding to the pool id.
@@ -952,7 +1140,7 @@ struct rte_eth_vmdq_rx_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
 };
 
 /**
@@ -961,7 +1149,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1045,7 +1233,7 @@ struct rte_eth_rxconf {
 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1074,7 +1262,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1185,12 +1373,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
-	RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+	RTE_ETH_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1221,18 +1414,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1240,11 +1444,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in RX descriptors.
@@ -1261,9 +1470,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** RX queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1272,6 +1481,8 @@ struct rte_fdir_conf {
 	/**< Flex payload configuration. */
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1289,7 +1500,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1298,18 +1509,20 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the RX multi-queue mode, extra advanced
  * configuration settings may be needed.
  */
 struct rte_eth_conf {
-	uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
-				used. ETH_LINK_SPEED_FIXED disables link
+	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+				used. RTE_ETH_LINK_SPEED_FIXED disables link
 				autonegotiation, and a unique speed shall be
 				set. Otherwise, the bitmap defines the set of
 				speeds to be advertised. If the special value
-				ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
 				supported are advertised. */
 	struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
 	struct rte_eth_txmode txmode; /**< Port TX configuration. */
@@ -1335,48 +1548,70 @@ struct rte_eth_conf {
 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
 		/**< Port vmdq TX configuration. */
 	} tx_adv_conf; /**< Port TX DCB configuration (union). */
-	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
-	    is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+	/**
+	 * Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
+	 * is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT.
+	 */
 	uint32_t dcb_capability_en;
-	struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
-	struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+	struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
 };
 
 /**
  * RX offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP  0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP	RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM	RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO     0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO		RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP  0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP	RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP	RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT	0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT	RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER	RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER	0x00002000
+#define DEV_RX_OFFLOAD_SCATTER		RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP	0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP	RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY     0x00008000
+#define DEV_RX_OFFLOAD_SECURITY		RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC	0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC		RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH	0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH	RTE_ETH_RX_OFFLOAD_RSS_HASH
 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
 
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1386,52 +1621,74 @@ struct rte_eth_conf {
 /**
  * TX offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT	RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM	RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO     0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO		RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO     0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO		RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT	RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT	RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS	RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**< Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 /**< Device supports optimization for fast release of mbufs.
  *   When set application must guarantee that per-queue all mbufs comes from
  *   the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY	RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO	RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP	RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1563,7 +1820,7 @@ struct rte_eth_dev_info {
 	uint16_t vmdq_pool_base;  /**< First ID of VMDQ pools. */
 	struct rte_eth_desc_lim rx_desc_lim;  /**< RX descriptors limits */
 	struct rte_eth_desc_lim tx_desc_lim;  /**< TX descriptors limits */
-	uint32_t speed_capa;  /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 	/** Configured number of rx/tx queues */
 	uint16_t nb_rx_queues; /**< Number of RX queues. */
 	uint16_t nb_tx_queues; /**< Number of TX queues. */
@@ -1667,8 +1924,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS	RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL	RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1679,12 +1938,12 @@ struct rte_eth_dcb_tc_queue_mapping {
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 	/** rx queues assigned to tc per Pool */
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 };
 
 /**
@@ -1693,8 +1952,8 @@ struct rte_eth_dcb_tc_queue_mapping {
  */
 struct rte_eth_dcb_info {
 	uint8_t nb_tcs;        /**< number of TCs */
-	uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
-	uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
+	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
 	/** rx queues assigned to tc */
 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
 };
@@ -1718,7 +1977,7 @@ enum rte_eth_fec_mode {
 
 /* A structure used to get capabilities per link speed */
 struct rte_eth_fec_capa {
-	uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
 	uint32_t capa;  /**< FEC capabilities bitmask */
 };
 
@@ -1741,13 +2000,17 @@ struct rte_eth_fec_capa {
 
 /**@{@name L2 tunnel configuration */
 /**< l2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK	RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /**< l2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK	RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /**< l2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK	RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /**< l2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK	RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 /**@}*/
 
 /**
@@ -2058,14 +2321,14 @@ uint16_t rte_eth_dev_count_total(void);
  * @param speed
  *   Numerical speed value in Mbps
  * @param duplex
- *   ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
  * @return
  *   0 if the speed cannot be mapped
  */
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2075,7 +2338,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2169,7 +2432,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -2746,7 +3009,7 @@ const char *rte_eth_link_speed_to_str(uint32_t link_speed);
  *
  * @param str
  *   A pointer to a string to be filled with textual representation of
- *   device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
  *   store default link status text.
  * @param len
  *   Length of available memory at 'str' string.
@@ -3292,10 +3555,10 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  *   The port identifier of the Ethernet device.
  * @param offload_mask
  *   The VLAN Offload bit mask can be mixed use with "OR"
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
@@ -3311,10 +3574,10 @@ int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
  *   The port identifier of the Ethernet device.
  * @return
  *   - (>0) if successful. Bit mask to indicate
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  *   - (-ENODEV) if *port_id* invalid.
  */
 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
@@ -5339,7 +5602,7 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index d5bfdaaaf2ec..2b320092990b 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2710,7 +2710,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index d6f167994411..5a5b6b1e33c1 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -185,7 +185,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -208,7 +208,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -253,7 +253,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -266,7 +266,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index fb03cf1dcf90..29abe8da53cf 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5] ethdev: add namespace
  2021-10-18 15:43     ` [dpdk-dev] [PATCH v4] " Ferruh Yigit
@ 2021-10-20 19:23       ` Ferruh Yigit
  2021-10-22  2:02         ` [dpdk-dev] [PATCH v6] " Ferruh Yigit
  0 siblings, 1 reply; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-20 19:23 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Somalapuram Amaranath, Rasesh Mody, Shahed Shaikh,
	Bruce Richardson, Konstantin Ananyev, Ruifeng Wang,
	Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk, Shai Brandes,
	Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh, Gaetan Rivet,
	Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou,
	Jingjing Wu, Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff, David Marchand

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=true, Size: 1214846 bytes --]

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Cc: David Marchand <david.marchand@redhat.com>
Cc: Thomas Monjalon <thomas@monjalon.net>

v2:
* Updated internal components
* Removed deprecation notice

v3:
* Updated missing macros / structs that David highlighted
* Added release notes update

v4:
* rebased on latest next-net
* depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
* Not able to complete scripts to update user code, although some
  shared by Aman:
  https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
  Sending new version for possible option to get this patch for -rc1 and
  work for scripts later, before release.

v5:
* rebased on latest next-net
---
 app/proc-info/main.c                          |    8 +-
 app/test-eventdev/test_perf_common.c          |    4 +-
 app/test-eventdev/test_pipeline_common.c      |   10 +-
 app/test-flow-perf/config.h                   |    2 +-
 app/test-pipeline/init.c                      |    8 +-
 app/test-pmd/cmdline.c                        |  286 ++---
 app/test-pmd/config.c                         |  200 ++--
 app/test-pmd/csumonly.c                       |   28 +-
 app/test-pmd/flowgen.c                        |    6 +-
 app/test-pmd/macfwd.c                         |    6 +-
 app/test-pmd/macswap_common.h                 |    6 +-
 app/test-pmd/parameters.c                     |   54 +-
 app/test-pmd/testpmd.c                        |   52 +-
 app/test-pmd/testpmd.h                        |    2 +-
 app/test-pmd/txonly.c                         |    6 +-
 app/test/test_ethdev_link.c                   |   68 +-
 app/test/test_event_eth_rx_adapter.c          |    4 +-
 app/test/test_kni.c                           |    2 +-
 app/test/test_link_bonding.c                  |    4 +-
 app/test/test_link_bonding_mode4.c            |    4 +-
 app/test/test_link_bonding_rssconf.c          |   28 +-
 app/test/test_pmd_perf.c                      |   12 +-
 app/test/virtual_pmd.c                        |   10 +-
 doc/guides/eventdevs/cnxk.rst                 |    2 +-
 doc/guides/eventdevs/octeontx2.rst            |    2 +-
 doc/guides/nics/af_packet.rst                 |    2 +-
 doc/guides/nics/bnxt.rst                      |   24 +-
 doc/guides/nics/enic.rst                      |    2 +-
 doc/guides/nics/features.rst                  |  114 +-
 doc/guides/nics/fm10k.rst                     |    6 +-
 doc/guides/nics/intel_vf.rst                  |   10 +-
 doc/guides/nics/ixgbe.rst                     |   12 +-
 doc/guides/nics/mlx5.rst                      |    4 +-
 doc/guides/nics/tap.rst                       |    2 +-
 .../generic_segmentation_offload_lib.rst      |    8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |   18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |    8 +-
 doc/guides/prog_guide/rte_flow.rst            |   34 +-
 doc/guides/prog_guide/rte_security.rst        |    2 +-
 doc/guides/rel_notes/deprecation.rst          |   10 +-
 doc/guides/rel_notes/release_21_11.rst        |    3 +
 doc/guides/sample_app_ug/ipsec_secgw.rst      |    4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |    2 +-
 drivers/bus/dpaa/include/process.h            |   16 +-
 drivers/common/cnxk/roc_npc.h                 |    2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |   20 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |   12 +-
 drivers/net/ark/ark_ethdev.c                  |   16 +-
 drivers/net/atlantic/atl_ethdev.c             |   88 +-
 drivers/net/atlantic/atl_ethdev.h             |   18 +-
 drivers/net/atlantic/atl_rxtx.c               |    6 +-
 drivers/net/avp/avp_ethdev.c                  |   26 +-
 drivers/net/axgbe/axgbe_dev.c                 |    6 +-
 drivers/net/axgbe/axgbe_ethdev.c              |  104 +-
 drivers/net/axgbe/axgbe_ethdev.h              |   12 +-
 drivers/net/axgbe/axgbe_mdio.c                |    2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |    6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |   12 +-
 drivers/net/bnxt/bnxt.h                       |   62 +-
 drivers/net/bnxt/bnxt_ethdev.c                |  172 +--
 drivers/net/bnxt/bnxt_flow.c                  |    6 +-
 drivers/net/bnxt/bnxt_hwrm.c                  |  112 +-
 drivers/net/bnxt/bnxt_reps.c                  |    2 +-
 drivers/net/bnxt/bnxt_ring.c                  |    4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |   28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |    4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |    2 +-
 drivers/net/bnxt/bnxt_txr.c                   |    4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |   30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |    8 +-
 drivers/net/bonding/eth_bond_private.h        |    4 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |   16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |    6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |   50 +-
 drivers/net/cnxk/cn10k_ethdev.c               |   42 +-
 drivers/net/cnxk/cn10k_rte_flow.c             |    2 +-
 drivers/net/cnxk/cn10k_rx.c                   |    4 +-
 drivers/net/cnxk/cn10k_tx.c                   |    4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |   60 +-
 drivers/net/cnxk/cn9k_rx.c                    |    4 +-
 drivers/net/cnxk/cn9k_tx.c                    |    4 +-
 drivers/net/cnxk/cnxk_ethdev.c                |  112 +-
 drivers/net/cnxk/cnxk_ethdev.h                |   49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |    6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            |  106 +-
 drivers/net/cnxk/cnxk_link.c                  |   14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |    4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |    2 +-
 drivers/net/cxgbe/cxgbe.h                     |   46 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |   42 +-
 drivers/net/cxgbe/cxgbe_main.c                |   12 +-
 drivers/net/dpaa/dpaa_ethdev.c                |  180 +--
 drivers/net/dpaa/dpaa_ethdev.h                |   10 +-
 drivers/net/dpaa/dpaa_flow.c                  |   32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |   47 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              |  138 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |   22 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |    8 +-
 drivers/net/e1000/e1000_ethdev.h              |   18 +-
 drivers/net/e1000/em_ethdev.c                 |   64 +-
 drivers/net/e1000/em_rxtx.c                   |   38 +-
 drivers/net/e1000/igb_ethdev.c                |  158 +--
 drivers/net/e1000/igb_pf.c                    |    2 +-
 drivers/net/e1000/igb_rxtx.c                  |  116 +-
 drivers/net/ena/ena_ethdev.c                  |   70 +-
 drivers/net/ena/ena_ethdev.h                  |    4 +-
 drivers/net/ena/ena_rss.c                     |   74 +-
 drivers/net/enetc/enetc_ethdev.c              |   30 +-
 drivers/net/enic/enic.h                       |    2 +-
 drivers/net/enic/enic_ethdev.c                |   88 +-
 drivers/net/enic/enic_main.c                  |   40 +-
 drivers/net/enic/enic_res.c                   |   50 +-
 drivers/net/failsafe/failsafe.c               |    8 +-
 drivers/net/failsafe/failsafe_intr.c          |    4 +-
 drivers/net/failsafe/failsafe_ops.c           |   78 +-
 drivers/net/fm10k/fm10k.h                     |    4 +-
 drivers/net/fm10k/fm10k_ethdev.c              |  146 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |    6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |   22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          |  136 +--
 drivers/net/hinic/hinic_pmd_rx.c              |   36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |   22 +-
 drivers/net/hns3/hns3_dcb.c                   |   14 +-
 drivers/net/hns3/hns3_ethdev.c                |  352 +++---
 drivers/net/hns3/hns3_ethdev.h                |   12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             |  100 +-
 drivers/net/hns3/hns3_flow.c                  |    6 +-
 drivers/net/hns3/hns3_ptp.c                   |    2 +-
 drivers/net/hns3/hns3_rss.c                   |  108 +-
 drivers/net/hns3/hns3_rss.h                   |   28 +-
 drivers/net/hns3/hns3_rxtx.c                  |   30 +-
 drivers/net/hns3/hns3_rxtx.h                  |    2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |   10 +-
 drivers/net/i40e/i40e_ethdev.c                |  272 ++---
 drivers/net/i40e/i40e_ethdev.h                |   24 +-
 drivers/net/i40e/i40e_flow.c                  |   32 +-
 drivers/net/i40e/i40e_hash.c                  |  158 +--
 drivers/net/i40e/i40e_pf.c                    |   14 +-
 drivers/net/i40e/i40e_rxtx.c                  |    8 +-
 drivers/net/i40e/i40e_rxtx.h                  |    4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |    2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |    8 +-
 drivers/net/i40e/i40e_vf_representor.c        |   48 +-
 drivers/net/iavf/iavf.h                       |   24 +-
 drivers/net/iavf/iavf_ethdev.c                |  178 +--
 drivers/net/iavf/iavf_hash.c                  |  320 +++---
 drivers/net/iavf/iavf_rxtx.c                  |    2 +-
 drivers/net/iavf/iavf_rxtx.h                  |   24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |    4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |    6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |    2 +-
 drivers/net/ice/ice_dcf.c                     |    2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |   86 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |   56 +-
 drivers/net/ice/ice_ethdev.c                  |  180 +--
 drivers/net/ice/ice_ethdev.h                  |   26 +-
 drivers/net/ice/ice_hash.c                    |  290 ++---
 drivers/net/ice/ice_rxtx.c                    |   16 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |    2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |    4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |   28 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |    2 +-
 drivers/net/igc/igc_ethdev.c                  |  138 +--
 drivers/net/igc/igc_ethdev.h                  |   54 +-
 drivers/net/igc/igc_txrx.c                    |   48 +-
 drivers/net/ionic/ionic_ethdev.c              |  138 +--
 drivers/net/ionic/ionic_ethdev.h              |   12 +-
 drivers/net/ionic/ionic_lif.c                 |   36 +-
 drivers/net/ionic/ionic_rxtx.c                |   10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |   64 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              |  285 +++--
 drivers/net/ixgbe/ixgbe_ethdev.h              |   18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |   24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |    2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |   12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |   34 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                |  249 ++--
 drivers/net/ixgbe/ixgbe_rxtx.h                |    4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |    2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |   16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |   16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |   14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |    4 +-
 drivers/net/kni/rte_eth_kni.c                 |    8 +-
 drivers/net/liquidio/lio_ethdev.c             |  114 +-
 drivers/net/memif/memif_socket.c              |    2 +-
 drivers/net/memif/rte_eth_memif.c             |   16 +-
 drivers/net/mlx4/mlx4_ethdev.c                |   32 +-
 drivers/net/mlx4/mlx4_flow.c                  |   30 +-
 drivers/net/mlx4/mlx4_intr.c                  |    8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |   18 +-
 drivers/net/mlx4/mlx4_txq.c                   |   24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |   54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |    6 +-
 drivers/net/mlx5/mlx5.c                       |    4 +-
 drivers/net/mlx5/mlx5.h                       |    2 +-
 drivers/net/mlx5/mlx5_defs.h                  |    6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |    6 +-
 drivers/net/mlx5/mlx5_flow.c                  |   54 +-
 drivers/net/mlx5/mlx5_flow.h                  |   12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |   44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |    4 +-
 drivers/net/mlx5/mlx5_rss.c                   |   10 +-
 drivers/net/mlx5/mlx5_rxq.c                   |   40 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |    8 +-
 drivers/net/mlx5/mlx5_tx.c                    |   30 +-
 drivers/net/mlx5/mlx5_txq.c                   |   58 +-
 drivers/net/mlx5/mlx5_vlan.c                  |    4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |    4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |   32 +-
 drivers/net/mvneta/mvneta_ethdev.h            |   10 +-
 drivers/net/mvneta/mvneta_rxtx.c              |    2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               |  112 +-
 drivers/net/netvsc/hn_ethdev.c                |   70 +-
 drivers/net/netvsc/hn_rndis.c                 |   50 +-
 drivers/net/nfb/nfb_ethdev.c                  |   20 +-
 drivers/net/nfb/nfb_rx.c                      |    2 +-
 drivers/net/nfp/nfp_common.c                  |  122 +-
 drivers/net/nfp/nfp_ethdev.c                  |    2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |    2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |   50 +-
 drivers/net/null/rte_eth_null.c               |   28 +-
 drivers/net/octeontx/octeontx_ethdev.c        |   74 +-
 drivers/net/octeontx/octeontx_ethdev.h        |   30 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |   26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |   96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |   64 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |   12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |   14 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |    8 +-
 drivers/net/octeontx2/otx2_flow.c             |    2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |   36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |    4 +-
 drivers/net/octeontx2/otx2_link.c             |   40 +-
 drivers/net/octeontx2/otx2_mcast.c            |    2 +-
 drivers/net/octeontx2/otx2_ptp.c              |    4 +-
 drivers/net/octeontx2/otx2_rss.c              |   70 +-
 drivers/net/octeontx2/otx2_rx.c               |    4 +-
 drivers/net/octeontx2/otx2_tx.c               |    2 +-
 drivers/net/octeontx2/otx2_vlan.c             |   42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |    6 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |    6 +-
 drivers/net/pcap/pcap_ethdev.c                |   12 +-
 drivers/net/pfe/pfe_ethdev.c                  |   18 +-
 drivers/net/qede/base/mcp_public.h            |    4 +-
 drivers/net/qede/qede_ethdev.c                |  156 +--
 drivers/net/qede/qede_filter.c                |   42 +-
 drivers/net/qede/qede_rxtx.c                  |    2 +-
 drivers/net/qede/qede_rxtx.h                  |   16 +-
 drivers/net/ring/rte_eth_ring.c               |   20 +-
 drivers/net/sfc/sfc.c                         |   30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |   10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |   20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |    4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |    8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |   32 +-
 drivers/net/sfc/sfc_ethdev.c                  |   50 +-
 drivers/net/sfc/sfc_flow.c                    |    2 +-
 drivers/net/sfc/sfc_port.c                    |   52 +-
 drivers/net/sfc/sfc_repr.c                    |   10 +-
 drivers/net/sfc/sfc_rx.c                      |   50 +-
 drivers/net/sfc/sfc_tx.c                      |   50 +-
 drivers/net/softnic/rte_eth_softnic.c         |   12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |   14 +-
 drivers/net/tap/rte_eth_tap.c                 |  104 +-
 drivers/net/tap/tap_rss.h                     |    2 +-
 drivers/net/thunderx/nicvf_ethdev.c           |  102 +-
 drivers/net/thunderx/nicvf_ethdev.h           |   40 +-
 drivers/net/txgbe/txgbe_ethdev.c              |  242 ++--
 drivers/net/txgbe/txgbe_ethdev.h              |   18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |   24 +-
 drivers/net/txgbe/txgbe_fdir.c                |   20 +-
 drivers/net/txgbe/txgbe_flow.c                |    2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |   12 +-
 drivers/net/txgbe/txgbe_pf.c                  |   34 +-
 drivers/net/txgbe/txgbe_rxtx.c                |  308 ++---
 drivers/net/txgbe/txgbe_rxtx.h                |    4 +-
 drivers/net/txgbe/txgbe_tm.c                  |   16 +-
 drivers/net/vhost/rte_eth_vhost.c             |   16 +-
 drivers/net/virtio/virtio_ethdev.c            |  124 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |   72 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |   16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |   16 +-
 examples/bbdev_app/main.c                     |    6 +-
 examples/bond/main.c                          |   14 +-
 examples/distributor/main.c                   |   12 +-
 examples/ethtool/ethtool-app/main.c           |    2 +-
 examples/ethtool/lib/rte_ethtool.c            |   18 +-
 .../pipeline_worker_generic.c                 |   16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |   12 +-
 examples/flow_classify/flow_classify.c        |    4 +-
 examples/flow_filtering/main.c                |   16 +-
 examples/ioat/ioatfwd.c                       |    8 +-
 examples/ip_fragmentation/main.c              |   12 +-
 examples/ip_pipeline/link.c                   |   20 +-
 examples/ip_reassembly/main.c                 |   18 +-
 examples/ipsec-secgw/ipsec-secgw.c            |   32 +-
 examples/ipsec-secgw/sa.c                     |    8 +-
 examples/ipv4_multicast/main.c                |    6 +-
 examples/kni/main.c                           |    8 +-
 examples/l2fwd-crypto/main.c                  |   10 +-
 examples/l2fwd-event/l2fwd_common.c           |   10 +-
 examples/l2fwd-event/main.c                   |    2 +-
 examples/l2fwd-jobstats/main.c                |    8 +-
 examples/l2fwd-keepalive/main.c               |    8 +-
 examples/l2fwd/main.c                         |    8 +-
 examples/l3fwd-acl/main.c                     |   18 +-
 examples/l3fwd-graph/main.c                   |   14 +-
 examples/l3fwd-power/main.c                   |   16 +-
 examples/l3fwd/l3fwd_event.c                  |    4 +-
 examples/l3fwd/main.c                         |   18 +-
 examples/link_status_interrupt/main.c         |   10 +-
 .../client_server_mp/mp_server/init.c         |    4 +-
 examples/multi_process/symmetric_mp/main.c    |   14 +-
 examples/ntb/ntb_fwd.c                        |    6 +-
 examples/packet_ordering/main.c               |    4 +-
 .../performance-thread/l3fwd-thread/main.c    |   16 +-
 examples/pipeline/obj.c                       |   20 +-
 examples/ptpclient/ptpclient.c                |   10 +-
 examples/qos_meter/main.c                     |   16 +-
 examples/qos_sched/init.c                     |    6 +-
 examples/rxtx_callbacks/main.c                |    8 +-
 examples/server_node_efd/server/init.c        |    8 +-
 examples/skeleton/basicfwd.c                  |    4 +-
 examples/vhost/main.c                         |   26 +-
 examples/vm_power_manager/main.c              |    6 +-
 examples/vmdq/main.c                          |   20 +-
 examples/vmdq_dcb/main.c                      |   40 +-
 lib/ethdev/ethdev_driver.h                    |   36 +-
 lib/ethdev/rte_ethdev.c                       |  181 ++-
 lib/ethdev/rte_ethdev.h                       | 1021 +++++++++++------
 lib/ethdev/rte_flow.h                         |    2 +-
 lib/gso/rte_gso.c                             |   20 +-
 lib/gso/rte_gso.h                             |    4 +-
 lib/mbuf/rte_mbuf_core.h                      |    8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |    2 +-
 339 files changed, 6639 insertions(+), 6382 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index a8e928fa9ff3..963b6aa5c589 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 660d5a0364b6..31d1b0e14653 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,13 +668,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 2775e72c580d..d202091077a6 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 3221f6e1aa40..ebea13f86ab0 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "ipv4-chksum"))
-		rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+		rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
 			return -1;
 		}
 
-		idx = hash_index / RTE_RETA_GROUP_SIZE;
-		shift = hash_index % RTE_RETA_GROUP_SIZE;
+		idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+		shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[idx].mask |= (1ULL << shift);
 		reta_conf[idx].reta[shift] = nb_queue;
 	}
@@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
 	char *end;
 	char *str_fld[8];
 	uint16_t i;
-	uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 	int ret;
 
 	p = strchr(p0, '(');
@@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4615,55 +4615,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 23aa334cda0f..f8ddfe60cd58 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,62 +86,62 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
-	{ "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
-	{ "l4-chksum", ETH_RSS_L4_CHKSUM },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
+	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
 	{ NULL, 0 },
 };
 
@@ -538,39 +538,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -700,9 +700,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -720,22 +720,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -2919,8 +2919,8 @@ port_rss_reta_info(portid_t port_id,
 	}
 
 	for (i = 0; i < nb_entries; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3288,7 +3288,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4351,11 +4351,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4381,11 +4381,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4426,11 +4426,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4456,11 +4456,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4530,7 +4530,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4539,7 +4539,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4547,7 +4547,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4566,7 +4566,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4574,8 +4574,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4584,8 +4584,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -4991,7 +4991,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 090797318a35..75b24487e72e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 7ebed9fed334..03d026dec169 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index ee76df7f0323..57e00bca20e7 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index ab8e8f7e694a..693e77eff2c0 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -546,29 +546,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -1000,13 +1000,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -1048,34 +1048,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1097,13 +1097,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1482,12 +1482,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index af0e79fe6d51..bf2420db0da6 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -348,7 +348,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -459,12 +459,12 @@ lcoreid_t latencystats_lcore_id = -1;
 struct rte_eth_rxmode rx_mode;
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -518,7 +518,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1572,9 +1572,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1711,8 +1711,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3457,7 +3457,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3751,17 +3751,17 @@ init_port_config(void)
 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			} else {
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 				port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 				for (i = 0;
 				     i < port->dev_info.nb_rx_queues;
 				     i++)
 					port->rx_conf[i].offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 			}
 		}
 
@@ -3849,9 +3849,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3859,7 +3859,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3867,8 +3867,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3884,23 +3884,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3929,7 +3929,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3979,7 +3979,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(rte_port);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e3995d24ab53..ccd025d5e0f5 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -491,7 +491,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 extern uint32_t max_rx_pkt_len;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e45f8840c91c..9eb7992815e8 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..6248aea49abd 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index add4d8a67821..a09253e91814 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 5388d18125a6..8a9ef851789f 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 189d2430f27e..351129de2f9b 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index e7bb0497b663..f9eae9397386 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -52,7 +52,7 @@ struct slave_conf {
 
 	struct rte_eth_rss_conf rss_conf;
 	uint8_t rss_key[40];
-	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t is_slave;
 	struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
 struct link_bonding_rssconf_unittest_params {
 	uint8_t bond_port_id;
 	struct rte_eth_dev_info bond_dev_info;
-	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 	struct slave_conf slave_ports[SLAVE_COUNT];
 
 	struct rte_mempool *mbuf_pool;
@@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
@@ -207,13 +207,13 @@ bond_slaves(void)
 static int
 reta_set(uint16_t port_id, uint8_t value, int reta_size)
 {
-	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
 	int i, j;
 
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
 		/* select all fields to set */
 		reta_conf[i].mask = ~0LL;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			reta_conf[i].reta[j] = value;
 	}
 
@@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
 	for (i = 0; i < test_params.bond_dev_info.reta_size;
 			i++) {
 
-		int index = i / RTE_RETA_GROUP_SIZE;
-		int shift = i % RTE_RETA_GROUP_SIZE;
+		int index = i / RTE_ETH_RETA_GROUP_SIZE;
+		int shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (port->reta_conf[index].reta[shift] !=
 				test_params.bond_reta_conf[index].reta[shift])
@@ -251,7 +251,7 @@ static int
 bond_reta_fetch(void) {
 	unsigned j;
 
-	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
 			j++)
 		test_params.bond_reta_conf[j].mask = ~0LL;
 
@@ -268,7 +268,7 @@ static int
 slave_reta_fetch(struct slave_conf *port) {
 	unsigned j;
 
-	for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+	for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
 		port->reta_conf[j].mask = ~0LL;
 
 	TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index a3b4f52c65e6..1df86ce080e5 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7e15b47eb0fb..d9f2e4f66bde 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -562,9 +562,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/nics/af_packet.rst b/doc/guides/nics/af_packet.rst
index bdd6e7263c85..54feffdef4bd 100644
--- a/doc/guides/nics/af_packet.rst
+++ b/doc/guides/nics/af_packet.rst
@@ -70,5 +70,5 @@ Features and Limitations
 ------------------------
 
 The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
 application.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index aa6032889a55..b3d10f30dc77 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index 8dd421ca013b..b48d9dcb9591 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -177,7 +177,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -205,12 +205,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -221,12 +221,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -287,9 +287,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -302,7 +302,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -339,7 +339,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -362,7 +362,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -378,7 +378,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -416,13 +416,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -438,14 +438,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -459,7 +459,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -469,13 +469,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -487,14 +487,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -508,7 +508,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -519,16 +519,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -538,8 +538,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -547,8 +547,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -557,10 +557,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -570,11 +570,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -584,16 +584,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -603,15 +603,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_packet_type_parsing:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index ed6afd62703d..bba53f5a64ee 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41bb4..a1e236ad75e5 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -216,21 +216,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 20a74b9b5bcd..148d2f5fc2be 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -163,13 +163,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index e4f58c899031..cc1726207f6c 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -607,7 +607,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, MTU can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate MTU as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index aeba3741825e..063ff388476a 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1968,23 +1968,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index ad92c16868c1..46c9b51d1bf9 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 0b4d03fb961f..199c3fa0bd70 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -58,22 +58,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 041383ee2a73..707352099b13 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -368,6 +368,9 @@ ABI Changes
   to be transparent for both users (no changes in user app is required) and
   PMD developers (no changes in PMD is required).
 
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+  updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
 
 Known Issues
 ------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 8ff7ab85369c..2e1446ee461b 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -537,7 +537,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index 10d1ac82a4bd..21883f6b3f66 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -160,7 +160,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index a077376dc0fb..8f778f0c2419 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 	const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
 	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	return 0;
 }
 
@@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index b362ccdcd38c..e156246f24df 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -652,7 +652,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -661,7 +661,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5a198f53fce7..f7bfac796c07 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index fbc9917ed30d..ed9ef9f0cc52 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 0d3460383a50..2ff426892df2 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 932ec90265cf..5d94db02c506 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index ca32ad641873..3aaa2193272f 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 0250256830ac..dab0c6775d1d 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
 
 	max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index c8618d2d6daa..aa2c27ebaa49 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 567ea2382864..78fc717ec44a 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 6743cf92b0e6..39bd739c7bc9 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,37 +569,37 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index f385723a9f65..2791a5c62db1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
 	bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < reta_size; i++) {
 		struct bnxt_rx_queue *rxq;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (!(reta_conf[idx].mask & (1ULL << sft)))
 			continue;
@@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 	}
 
 	for (idx = 0, i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
@@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		tunnel_type =
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index b2ebb5634e3a..ced697a73980 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
 	}
 
 	/* If RSS types is 0, use a best effort configuration */
-	types = rss->types ? rss->types : ETH_RSS_IPV4;
+	types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
 
 	hash_type = bnxt_rte_to_hwrm_hash_types(types);
 
@@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 181e607d7bf8..82e89b7c8af7 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index b7e88e013a84..1c07db3ca9c5 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 08cefa1baaef..7940d489a102 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 			rx_ring_info->rx_ring_struct->ring_size *
 			AGG_RING_SIZE_FACTOR)) : 0;
 
-		if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+		if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 			int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 			tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 					    ag_bitmap_start, ag_bitmap_len);
 
 			/* TPA info */
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 				rx_ring_info->tpa_info =
 					((struct bnxt_tpa_info *)
 					 ((char *)mz->addr + tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 38ec4aa14b77..1456f8b54ffa 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aeacc60a0127..eb555c4545e6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 9e45ddd7a82e..f2fcaf53021c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..8b104b639184 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,8 +167,8 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
 	uint8_t rss_key_len;				/**< hash key length in bytes. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2029955c1092..ca50583d62d8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 5140ef14c2ee..84943cffe2bb 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 8d038ba6b6c4..834a5937b3aa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		/* rss_key won't be empty if RSS is configured in bonded dev */
 		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.mtu =
 			bonded_eth_dev->data->dev_conf.rxmode.mtu;
@@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < reta_count; i++) {
 		internals->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
 
@@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		struct rte_eth_rss_conf *rss_conf =
 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
 		if (rss_conf->rss_key != NULL) {
@@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 
 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
 			internals->reta_conf[i].mask = ~0LL;
-			for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 				internals->reta_conf[i].reta[j] =
-						(i * RTE_RETA_GROUP_SIZE + j) %
+						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
 						dev->data->nb_rx_queues;
 		}
 	}
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 9dfea99db9b2..d52f8ffecf23 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rte_flow.c b/drivers/net/cnxk/cn10k_rte_flow.c
index 8c87452934eb..dff4c7746cf5 100644
--- a/drivers/net/cnxk/cn10k_rte_flow.c
+++ b/drivers/net/cnxk/cn10k_rte_flow.c
@@ -98,7 +98,7 @@ cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index d6af54b56de6..5d603514c045 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index eb962ef08cab..5e6c5ee11188 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 08c86f9e6b7b..17f8f6debbc8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 5c4387e74e0b..8d504c4a6d92 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index e5691a2a7e16..f3f19fed9780 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2e05d8bf1552..db54468dbca1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	struct roc_nix *nix = &dev->nix;
 	int i, rc = 0;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
@@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		cnxk_nix_inb_mode_set(dev, true);
 	}
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		struct plt_bitmap *bmap;
 		size_t bmap_sz;
 		void *mem;
@@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 
 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
 
-		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
-		if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+		/* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
 			goto done;
 
 		rc = -ENOMEM;
@@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 done:
 	return 0;
 cleanup:
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		rc |= roc_nix_inl_inb_fini(nix);
 	return rc;
 }
@@ -182,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;
 
 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -199,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	}
 
 	/* Cleanup Inline outbound */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy outbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -242,8 +242,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -273,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -281,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -305,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -352,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -380,7 +380,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* When Tx Security offload is enabled, increase tx desc count by
 	 * max possible outbound desc count.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		nb_desc += dev->outb.nb_desc;
 
 	/* Setup ROC SQ */
@@ -499,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * to avoid meta packet drop as LBK does not currently support
 	 * backpressure.
 	 */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
 		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
 
 		/* Use current RQ's aura limit if inl rq is not available */
@@ -561,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rxq_sp->qconf.nb_desc = nb_desc;
 	rxq_sp->qconf.mp = mp;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup rq reference for inline dev if present */
 		rc = roc_nix_inl_dev_rq_get(rq);
 		if (rc)
@@ -579,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -618,7 +618,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);
 
 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		roc_nix_inl_dev_rq_put(rq);
 
 	/* Cleanup ROC RQ */
@@ -657,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -683,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -746,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -958,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -1007,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -1054,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1062,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
 		 * platform does not support it.
@@ -1454,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 72f80ae948cf..29a3540ed3f8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -58,41 +58,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH |            \
-	 DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP |                \
-	 DEV_RX_OFFLOAD_SECURITY)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |    \
+	 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
+	 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
+	 RTE_ETH_RX_OFFLOAD_SECURITY)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index c0b949e21ab0..e068f553495c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index d0924df76152..67464302653d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 		goto fail;
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = reta[idx];
 			idx++;
@@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 6a7080167598..f10a502826c6 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index ad89a2e105b1..c86c92ce4c2f 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 37625c5bfb69..dbcbfaf68a30 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,31 +28,31 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index f77b2976002c..4758321778d1 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 91d6bb9bbcb0..f1ac32270961 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c79cdb8d8ad7..89ea7dd47c0b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,29 +54,29 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (max_rx_pktlen <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
 				"MaxSGlist %d",
@@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 08f49af7685d..3170694841df 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
-			case ETH_RSS_ETH:
-
+			case RTE_ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_ETH:
 				if (l2_configured)
 					break;
 				l2_configured = 1;
@@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_PPPOE:
+			case RTE_ETH_RSS_PPPOE:
 				if (pppoe_configured)
 					break;
 				kg_cfg->extracts[i].extract.from_hdr.prot =
@@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_ESP:
+			case RTE_ETH_RSS_ESP:
 				if (esp_configured)
 					break;
 				esp_configured = 1;
@@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_AH:
+			case RTE_ETH_RSS_AH:
 				if (ah_configured)
 					break;
 				ah_configured = 1;
@@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_C_VLAN:
-			case ETH_RSS_S_VLAN:
+			case RTE_ETH_RSS_C_VLAN:
+			case RTE_ETH_RSS_S_VLAN:
 				if (vlan_configured)
 					break;
 				vlan_configured = 1;
@@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a0270e78520e..59e728577f53 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,33 +38,33 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return -1;
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fdc62ec30d22..c5e9267bf04d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,17 +65,17 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS | \
-	ETH_RSS_C_VLAN | \
-	ETH_RSS_S_VLAN | \
-	ETH_RSS_ESP | \
-	ETH_RSS_AH | \
-	ETH_RSS_PPPOE)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS | \
+	RTE_ETH_RSS_C_VLAN | \
+	RTE_ETH_RSS_S_VLAN | \
+	RTE_ETH_RSS_ESP | \
+	RTE_ETH_RSS_AH | \
+	RTE_ETH_RSS_PPPOE)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 93bee734ae5d..031c92a66fa0 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -81,15 +81,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 73152dec6ed1..9da477e59def 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -597,8 +597,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -611,39 +611,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1102,9 +1102,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1162,17 +1162,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1424,15 +1424,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1601,7 +1601,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1683,13 +1683,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 344149c19147..648b04154c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -173,7 +173,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1171,11 +1171,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1369,13 +1369,13 @@ em_get_rx_port_offloads_capa(void)
 	uint64_t rx_offload_capa;
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return rx_offload_capa;
 }
@@ -1469,7 +1469,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1788,7 +1788,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1831,7 +1831,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1844,7 +1844,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1870,7 +1870,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index dbe811a1ad2f..ae3bc4a9c201 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1073,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1099,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1117,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1146,8 +1146,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1287,8 +1287,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1296,7 +1296,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1310,39 +1310,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2185,21 +2185,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2225,7 +2225,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2251,9 +2251,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2296,12 +2296,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2402,17 +2402,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2588,7 +2588,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2703,22 +2703,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2870,7 +2870,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3024,13 +3024,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3099,18 +3099,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3258,22 +3258,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3571,16 +3571,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -3612,16 +3612,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a1d5eecc14a1..bcce2fc726d8 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -186,7 +186,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1459,13 +1459,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1640,19 +1640,19 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1733,7 +1733,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1950,23 +1950,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2032,23 +2032,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2170,15 +2170,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2214,9 +2214,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	/* VLVF: set up filters for vlan tags as configured */
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
-		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
-			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+			(cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
 			E1000_VLVF_POOLSEL_MASK)));
 	}
 
@@ -2268,7 +2268,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2282,14 +2282,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2338,7 +2338,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2374,7 +2374,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2444,7 +2444,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2488,16 +2488,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2505,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2545,7 +2545,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2743,7 +2743,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f3b17d70c9a4..4d2601d15a57 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -117,10 +117,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -332,7 +332,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -340,7 +340,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -357,12 +357,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -643,9 +643,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -923,7 +923,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -2004,9 +2004,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Scattered Rx cannot be turned off in the HW, so this capability must
 	 * be forced.
@@ -2067,17 +2067,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if (adapter->offloads.rx_offloads &
 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
 		port_offloads |=
-			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-	port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return port_offloads;
 }
@@ -2087,17 +2087,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 	if (adapter->offloads.tx_offloads &
 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
 		port_offloads |=
-			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
-	port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return port_offloads;
 }
@@ -2130,14 +2130,14 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
@@ -2303,7 +2303,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
@@ -2416,11 +2416,11 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
 		/* Check if requested offload is also enabled for the queue */
 		if ((ol_flags & PKT_TX_IP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_TCP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_UDP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
 			PMD_TX_LOG(DEBUG,
 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
 				i, m->nb_segs, tx_ring->id);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 4f4142ed12d0..865e1241e0ce 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -58,8 +58,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 152098410fa2..be4007e3f3fe 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 		/* Each reta_conf is for 64 entries.
 		 * To support 128 we use 2 conf of 64.
 		 */
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
 			entry_value =
 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -139,7 +139,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -154,8 +154,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0 ; i < reta_size ; i++) {
-		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
 			reta_conf[reta_conf_idx].reta[reta_idx] =
 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -199,34 +199,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -235,10 +235,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -247,10 +247,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -268,11 +268,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -284,11 +284,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -334,43 +334,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -541,7 +541,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 1b567f01eae0..7cdb8ce463ed 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,10 +207,10 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
 
 	return 0;
 }
@@ -463,7 +463,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -705,7 +705,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -713,10 +713,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cfdd..d5493c98345d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -178,7 +178,7 @@ struct enic {
 	 */
 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
 	uint8_t rss_enable;
-	uint64_t rss_hf; /* ETH_RSS flags */
+	uint64_t rss_hf; /* RTE_ETH_RSS flags */
 	union vnic_rss_key rss_key;
 	union vnic_rss_cpu rss_cpu;
 
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8df7332bc5e0..c8bdaf1a8e79 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -297,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -323,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -435,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -774,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -806,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
 	 */
 	rss_cpu = enic->rss_cpu;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			rss_cpu.cpu[i / 4].b[i % 4] =
 				enic_rte_rq_idx_to_sop_idx(
@@ -883,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -969,8 +969,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1010,7 +1010,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1039,7 +1039,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index dfc7f5d1f94f..21b1fffb14f0 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,9 +1745,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c5777772a09e..918a9e170ff6 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,19 +201,19 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index b87c036e6014..82d595b1d1a0 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 29de39910c6e..a3a8a1c82e3a 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1172,51 +1172,51 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 17c73c4dc5ae..b7522a47a80b 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 66f4a5c6df2c..d256334bfde9 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,20 +1767,20 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1965,12 +1965,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2111,8 +2111,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2160,8 +2160,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2198,15 +2198,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2243,15 +2243,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2606,7 +2606,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2622,7 +2622,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index c2374ebb6759..4cd5a85d5f8d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,24 +732,24 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -846,20 +846,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -901,8 +901,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1650,8 +1650,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1672,8 +1672,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1859,13 +1859,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1879,14 +1879,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1930,7 +1930,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1951,14 +1951,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -1994,7 +1994,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2015,15 +2015,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2053,7 +2053,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2067,8 +2067,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 
 	/* update rss indir_tbl */
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2133,8 +2133,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 8753c340e790..3d0159d78778 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 693048f58704..8e0ccecb57a6 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2213,9 +2213,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2223,7 +2223,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2232,7 +2232,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2400,11 +2400,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2464,15 +2464,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2493,7 +2493,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2600,15 +2600,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2619,19 +2619,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2650,7 +2650,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2676,40 +2676,40 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_get_support(hw, PTP))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2793,7 +2793,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2806,29 +2806,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2848,8 +2848,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2861,7 +2861,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3207,31 +3207,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3559,39 +3559,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4254,14 +4254,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4511,7 +4511,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4552,7 +4552,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4672,8 +4672,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4684,8 +4684,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4735,7 +4735,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4762,7 +4762,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4881,10 +4881,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4912,7 +4912,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5094,19 +5094,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5344,20 +5344,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5373,26 +5373,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5427,28 +5427,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5456,11 +5456,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5594,9 +5594,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5869,7 +5869,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6080,17 +6080,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6236,7 +6236,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6536,7 +6536,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6826,7 +6826,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6858,7 +6858,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -6957,12 +6957,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e28056b1bd60..0f55fd4c83ad 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -190,10 +190,10 @@ struct hns3_mac {
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1076,9 +1076,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 54dbd4b798f2..7b784048b518 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -935,32 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1640,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1653,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1724,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1749,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2059,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2218,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2570,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 38a2ee58a651..da6918fddda3 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index 5dfe68cc4dbd..9a829d7011ad 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..85495bbe89d9 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
 	memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
 	       sizeof(rss_cfg->rss_indirection_tbl));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
 			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_lock(&hw->lock);
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
 						rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 602548a4f25b..920ee8ceeab9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1924,7 +1924,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1969,7 +1969,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2845,7 +2845,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3139,7 +3139,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4291,7 +4291,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4303,16 +4303,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index c8229e9076b5..dfea5d5b4c2f 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index ff434d2d33ed..455110361aac 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_get_support(hw, PTP))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0a4db0891d4a..293df887bf7c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1629,7 +1629,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1896,8 +1896,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1931,13 +1931,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2214,17 +2214,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2332,13 +2332,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2876,34 +2876,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2930,8 +2930,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2946,28 +2946,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -2984,9 +2984,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3720,33 +3720,33 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3805,7 +3805,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3819,17 +3819,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3868,7 +3868,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3915,12 +3915,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3934,12 +3934,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3998,37 +3998,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4111,17 +4111,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4137,10 +4137,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4287,7 +4287,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4440,7 +4440,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4456,8 +4456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4483,7 +4483,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4500,8 +4500,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -4818,7 +4818,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6104,10 +6104,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6236,9 +6236,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7098,7 +7098,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7133,7 +7133,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7727,25 +7727,25 @@ static int
 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
 {
 	switch (filter_type) {
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_OIP:
+	case RTE_ETH_TUNNEL_FILTER_OIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
 		break;
-	case ETH_TUNNEL_FILTER_IIP:
+	case RTE_ETH_TUNNEL_FILTER_IIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 		break;
 	default:
@@ -8711,16 +8711,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8746,12 +8746,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8843,7 +8843,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8851,7 +8851,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8930,7 +8930,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10267,16 +10267,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10504,7 +10504,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11012,7 +11012,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11060,7 +11060,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1d57b9617e66..d8042abbd9be 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -147,17 +147,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1063,7 +1063,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e41a84f1d737..9acaa1875105 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
@@ -3601,13 +3601,13 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
 }
 
 static uint16_t i40e_supported_tunnel_filter_types[] = {
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
-	ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IMAC,
-	ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC,
 };
 
 static int
@@ -3697,12 +3697,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 			break;
@@ -3724,7 +3724,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -3798,7 +3798,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					   vxlan_spec->vni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			vxlan_flag = 1;
@@ -3927,12 +3927,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 
@@ -3955,7 +3955,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -4050,7 +4050,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					   nvgre_spec->tni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			nvgre_flag = 1;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 5da3d187076e..8962e9d97aa7 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -105,47 +105,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -208,30 +208,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -239,72 +239,72 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -319,27 +319,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -575,29 +575,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -836,7 +836,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -943,7 +943,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1081,22 +1081,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 554b1142c136..a13bb81115f4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -1995,7 +1995,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2243,7 +2243,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -3417,7 +3417,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 2301e6301d7d..5e6eecc50116 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -166,7 +166,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 4ffe030fcb64..7abc0821d119 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -900,7 +900,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52e3c567558..f9a7f4655050 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 12d5a2e48a9b..663c46b91dc5 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af4734..12f541f53926 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -50,18 +50,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 611f1f7722b0..df44df772e4e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -266,53 +266,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -331,13 +331,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -363,10 +363,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -467,7 +467,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -479,10 +479,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -512,8 +512,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -611,7 +611,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -961,34 +961,34 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1048,42 +1048,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1231,14 +1231,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1267,9 +1267,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1311,8 +1311,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(lut, vf->rss_lut, reta_size);
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -1348,8 +1348,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = vf->rss_lut[i];
 	}
@@ -1556,7 +1556,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 1f2d3772d105..248054f79efd 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -341,90 +341,90 @@ struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -442,7 +442,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -484,9 +484,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -504,7 +504,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -546,7 +546,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -580,52 +580,52 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
@@ -779,28 +779,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -808,39 +808,39 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -857,7 +857,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -865,87 +865,87 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -962,7 +962,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1059,10 +1059,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1073,27 +1073,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1103,9 +1103,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1122,15 +1122,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c1027..ac4db117f5cd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -617,7 +617,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e123..2d7f6b1b2dca 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 72a4fcab04a5..b47c51b8ebe4 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -906,7 +906,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 		    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -958,7 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					(_mm256_castsi128_si256(raw_desc_bh0),
 					raw_desc_bh1, 1);
 
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 12375d3d80bd..b8f2f69f12fc 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1141,7 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1193,7 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						(_mm256_castsi128_si256(raw_desc_bh0),
 						 raw_desc_bh1, 1);
 
-				if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+				if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1721,7 +1721,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e298..1de43b9b8ee2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -819,7 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index c9c01a14e349..7b7df5eebb6d 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -835,7 +835,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b8a537cb8556..a90e40964ec5 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -95,7 +95,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -576,7 +576,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -637,7 +637,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 
 	return 0;
@@ -652,8 +652,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -675,27 +675,27 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -925,42 +925,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -979,11 +979,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -1010,8 +1010,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 44fb38dbe7b1..b9fcfc80ad9b 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -143,28 +143,28 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -246,9 +246,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -345,7 +345,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -449,7 +449,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index edbc74632711..6a6637a15af7 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1487,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2993,14 +2993,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -3010,7 +3010,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3020,7 +3020,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3030,7 +3030,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3041,7 +3041,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3052,7 +3052,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3063,7 +3063,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3074,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3085,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3095,7 +3095,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3105,7 +3105,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3115,7 +3115,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3125,7 +3125,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3135,7 +3135,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3145,7 +3145,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3288,8 +3288,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3569,8 +3569,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3682,40 +3682,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH |
-			DEV_RX_OFFLOAD_TIMESTAMP;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH |
+			RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3754,24 +3754,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3836,8 +3836,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3853,55 +3853,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -4377,15 +4377,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -4500,8 +4500,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4550,8 +4550,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -5460,7 +5460,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5484,7 +5484,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
@@ -5505,7 +5505,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
 	int ret;
 
 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
 		return -1;
 	}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1cd3753ccc5f..599e0028f7e8 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -117,19 +117,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 20a3204fab7e..35eff8b17d28 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,87 +373,87 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -472,17 +472,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -647,86 +647,86 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		}
 
-		if (rss_type & ETH_RSS_IPV4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -735,10 +735,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -747,10 +747,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -762,81 +762,81 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -870,7 +870,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -892,10 +892,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -907,9 +907,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -928,16 +928,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9f5..8406240d7209 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -303,7 +303,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 		}
 	}
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		/* Register mbuf field and flag for Rx timestamp */
 		err = rte_mbuf_dyn_rx_timestamp_register(
 				&ice_timestamp_dynfield_offset,
@@ -367,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1117,7 +1117,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1624,7 +1624,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 				ts_ns = ice_tstamp_convert_32b_64b(hw,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 				if (ice_timestamp_dynflag > 0) {
@@ -1942,7 +1942,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2373,7 +2373,7 @@ ice_recv_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2889,7 +2889,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3365,7 +3365,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 490693bff218..86955539bea8 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -474,7 +474,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 7efe7b50a206..af23f6a34e58 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -585,7 +585,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -995,7 +995,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index f0f99265857e..b1d975b31a5a 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
@@ -287,7 +287,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		return -1;
 
 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 2a1ed90b641b..7ce80a442b35 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -307,8 +307,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -318,7 +318,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -334,8 +334,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -473,12 +473,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -490,9 +490,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -525,7 +525,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -972,18 +972,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -993,33 +993,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1482,14 +1482,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1515,9 +1515,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2141,13 +2141,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2179,16 +2179,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2234,29 +2234,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to update the register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* check mask whether need to read the register value first */
@@ -2290,29 +2290,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to read register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* read register and get the queue index */
@@ -2369,23 +2369,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2514,22 +2514,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2547,7 +2547,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 5e6c2ff30157..f56cad79e939 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -66,37 +66,37 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index 56132e8c6cd6..1d34ae2e1b15 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -847,23 +847,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1037,10 +1037,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1111,7 +1111,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1177,7 +1177,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1221,20 +1221,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1242,7 +1242,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1279,12 +1279,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2253,10 +2253,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index f94a1fed0a38..c688c3735c06 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -387,17 +387,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -411,24 +411,24 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -463,9 +463,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -487,14 +487,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -545,12 +545,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = tbl_sz / RTE_RETA_GROUP_SIZE;
+	num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if (reta_conf[i].mask & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -585,12 +585,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-			&lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
-			RTE_RETA_GROUP_SIZE);
+			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+			RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -618,17 +618,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -660,17 +660,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -842,15 +842,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -874,12 +874,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -896,7 +896,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index a1f9ce2d81cb..5e8fdf3893ad 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 4d16a39c6b6d..e3df7c56debe 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -203,11 +203,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -743,11 +743,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 063a9c6a6f7f..17088585757f 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,30 +67,30 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2399,10 +2399,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2460,9 +2460,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2518,9 +2518,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 46c95425adfb..7fd2c539e002 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1857,7 +1857,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1872,7 +1872,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1959,10 +1959,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2083,7 +2083,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2100,7 +2100,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2122,17 +2122,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2143,19 +2143,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		ixgbe_vlan_hw_strip_config(dev);
-	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2194,10 +2193,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2221,18 +2220,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2242,12 +2241,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2256,12 +2255,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2276,13 +2275,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2291,15 +2290,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2308,39 +2307,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2349,7 +2348,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2373,8 +2372,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2619,15 +2618,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2704,17 +2703,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2728,7 +2727,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2746,17 +2745,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3832,7 +3831,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3842,9 +3841,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3883,21 +3882,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3966,9 +3965,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4211,11 +4210,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4237,8 +4236,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4274,37 +4273,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4521,7 +4520,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4740,13 +4739,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5044,8 +5043,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5092,8 +5091,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5255,22 +5254,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5330,8 +5329,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5568,10 +5567,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5702,12 +5701,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5721,15 +5720,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -6724,15 +6723,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7143,16 +7142,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7162,10 +7161,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7221,7 +7220,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7232,7 +7231,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7256,9 +7255,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7271,7 +7270,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7524,7 +7523,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7556,7 +7555,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7653,12 +7652,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7690,11 +7689,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 950fb2d2450c..876b670f2682 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -114,15 +114,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 27322ab9038a..bdc9d4796c02 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 295e5a39b245..9f1bd0a62ba4 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -104,15 +104,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -263,15 +263,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -674,29 +674,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b263dfe1d574..9e5716f935a2 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2592,26 +2592,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2780,7 +2780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3021,7 +3021,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3032,19 +3032,19 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3054,20 +3054,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3122,7 +3122,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3507,23 +3507,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3605,23 +3605,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3697,12 +3697,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3727,7 +3727,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3736,7 +3736,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3752,7 +3752,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3776,7 +3776,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3858,7 +3858,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3874,12 +3874,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3889,7 +3889,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3907,12 +3907,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3922,7 +3922,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3949,7 +3949,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3976,7 +3976,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4145,7 +4145,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4158,8 +4158,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4172,7 +4172,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4183,7 +4183,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4199,15 +4199,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4257,9 +4257,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
 	}
 	if (config_dcb_tx) {
 		/* Only support an equally distributed
@@ -4273,7 +4272,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4309,7 +4308,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4323,7 +4322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4344,12 +4343,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4405,7 +4404,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4526,11 +4525,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4551,17 +4550,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4588,21 +4587,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4613,18 +4612,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4658,7 +4657,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4671,13 +4670,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4885,7 +4884,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4913,10 +4912,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4924,8 +4923,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4939,7 +4938,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4948,7 +4947,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5070,7 +5069,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5107,7 +5106,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5116,7 +5115,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5158,11 +5157,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5177,7 +5176,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5187,7 +5186,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5393,9 +5392,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5683,7 +5682,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5732,7 +5731,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
@@ -5740,8 +5739,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index a1764f2b08af..668a5b9814f6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -227,7 +227,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 005e60668a8b..cd34d4098785 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -277,7 +277,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index ae03ea6e9db3..ac8976062fa7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index 9fa75984fb31..bd528ff346c7 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..eef6f6661c74 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index cb9f7c8e8200..c428caf44189 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 0fc3f0ab66a9..90ffe31b9fda 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -519,10 +519,10 @@ lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
 
-	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				rss_state->itable[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -562,12 +562,12 @@ lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
-		       RTE_RETA_GROUP_SIZE);
+		       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+		       RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -595,17 +595,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -673,42 +673,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -757,7 +757,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -814,7 +814,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -912,10 +912,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -923,18 +923,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1086,8 +1086,8 @@ lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
 
 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
 				  i % eth_dev->data->nb_rx_queues : 0);
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
 	}
@@ -1103,10 +1103,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1484,7 +1484,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1505,11 +1505,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1721,9 +1721,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index 364e818d65c1..8533e39f6957 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 980150293e86..9deb7a5f1360 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -199,7 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
 	dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1381,10 +1381,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index ee2d2b75e59a..781ee256df71 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,12 +682,12 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -703,7 +703,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -832,7 +832,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 	if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
 		uint32_t sges_n;
 
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7d8c4f2a2223..0db2e55befd3 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a823d26bebf9..d207ec053e07 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1350,8 +1350,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1634,7 +1634,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e28cc461b914..7727dfb4196c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1488,10 +1488,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a15f86616d49..ea17a86f4955 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1217,7 +1217,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index b4d0b7b5ef32..4309852523b2 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -298,7 +298,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -560,8 +560,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -569,11 +569,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -584,8 +584,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -593,11 +593,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -659,32 +659,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1095,7 +1095,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1648,14 +1648,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6411,8 +6411,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -7036,7 +7036,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8704,7 +8704,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5c68d4f7d742..ff85c1c013a5 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -328,18 +328,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e31d4d846825..759fe57f19d6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10837,9 +10837,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10847,9 +10847,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10863,11 +10863,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10875,11 +10875,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14418,9 +14418,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14429,9 +14429,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14440,11 +14440,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14453,11 +14453,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14605,8 +14605,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 1627c3905fa4..8a455cbf22f4 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1816,7 +1816,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1829,7 +1829,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..a4f690039e24 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
@@ -170,8 +170,8 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	/* Fill each entry of the table even if its bit is not set. */
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
 			(*priv->reta_idx)[i];
 	}
 	return 0;
@@ -209,8 +209,8 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
 		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d8d7e481dea0..eb4dc3375248 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,22 +333,22 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -362,7 +362,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -694,7 +694,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1325,7 +1325,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pktlen = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
@@ -1428,7 +1428,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1472,7 +1472,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pktlen,
@@ -1487,7 +1487,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pktlen;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1548,9 +1548,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1559,11 +1559,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1593,7 +1593,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1f92250f5edd..02bb9307ae61 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,42 +98,42 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso) {
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
-				offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 		}
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -978,19 +978,19 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 						    MLX5_MAX_TSO_HEADER);
 		txq_ctrl->txq.tso_en = 1;
 	}
-	if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
-	   ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
-	   ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
 	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
 		txq_ctrl->txq.tunnel_en = 1;
-	txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
 				  txq_ctrl->txq.offloads) && (config->swp &
 				  MLX5_SW_PARSING_TSO_CAP)) |
-				((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
 				 txq_ctrl->txq.offloads) && (config->swp &
 				 MLX5_SW_PARSING_CSUM_CAP));
 }
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 8937ec0d3037..7f7b545ca63a 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -485,8 +485,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	if (config->hw_padding) {
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index 2a0288087357..10fe6d828ccd 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,7 +126,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -151,10 +151,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -503,28 +503,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index 6428f9ff7931..64aadcffd85a 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,14 +54,14 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index 9836bb071a82..62d8aa586dae 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -734,7 +734,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index a6458d2ce9b5..d0746b0d1215 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,15 +58,15 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -442,14 +442,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -483,8 +483,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -502,7 +502,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -524,7 +524,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -623,7 +623,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -644,7 +644,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -664,14 +664,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -893,7 +893,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -929,11 +929,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1202,30 +1202,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1709,11 +1709,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1733,9 +1733,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1864,13 +1864,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1879,7 +1879,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2022,7 +2022,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2182,7 +2182,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2191,10 +2191,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2240,19 +2240,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2329,11 +2329,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3152,7 +3152,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..9c4ae80e7e16 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -306,8 +306,8 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -346,8 +346,8 @@ static int hn_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index 62ba39636cd8..1b63b27e0c3e 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 99d93ebf4667..3c39937816a4 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index 3ebb332ae46c..f76e2ba64621 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 0003fd54dde5..3ea697c54462 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,19 +359,19 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
 	hw->mtu = dev->data->mtu;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -383,13 +383,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -397,7 +397,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -485,14 +485,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -504,15 +504,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -701,26 +701,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -757,22 +757,22 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -843,7 +843,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -973,12 +973,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1018,8 +1018,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1099,8 +1099,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1138,22 +1138,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1223,22 +1223,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 1169ea77a8c7..e08e594b04fe 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -141,7 +141,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 62cb3536e0c9..817fe64dbceb 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -103,7 +103,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 25b9e5b1ce1b..ca03469d0e6d 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -391,9 +391,9 @@ eth_rss_reta_update(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
 		internal->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -416,8 +416,8 @@ eth_rss_reta_query(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 	}
@@ -548,8 +548,8 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
-	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index f578123ed00b..5b8cbec67b5d 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -530,13 +530,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -571,7 +571,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->mtu > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -843,10 +843,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1356,7 +1356,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 3a02824e3948..c493fa7a03ed 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,23 +55,23 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 9c5d748e8575..72da8856bd86 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -665,7 +665,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -692,7 +692,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -707,29 +707,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -768,43 +768,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -914,8 +914,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1848,21 +1848,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2235,7 +2235,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2563,8 +2563,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 4557a0ee1945..a5282c6c1231 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,43 +117,43 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 22a8af5cba45..d5caaa326a5a 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -26,11 +26,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -568,17 +568,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index 7bd1ed6da043..4d40184de46d 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -869,8 +869,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -912,8 +912,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 79b92fda8a4a..91267bbb8182 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1186,7 +1186,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..68cef1caa394 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -85,8 +85,8 @@ otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				rss->ind_tbl[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -118,8 +118,8 @@ otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = rss->ind_tbl[j];
 	}
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index ffeade5952dc..986902287b67 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ff299f00b913..c60190074926 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index 698d22e22685..74dc36a17648 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,14 +33,14 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index aa4dcd33cc79..9338b30672ec 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,7 +954,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index d695c5eef7b0..ec29fd6bc53c 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -136,10 +136,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -659,7 +659,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -714,7 +714,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 4cc002ee8fab..047010e15ed0 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -601,9 +601,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 27f6932dc74e..c907d7fd8312 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1069,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	/* Configure default RETA */
 	memset(reta_conf, 0, sizeof(reta_conf));
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-		id = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		id = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		q = i % QEDE_RSS_COUNT(eth_dev);
 		reta_conf[id].reta[pos] = q;
 	}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1312,7 +1312,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 			return -ENOMEM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1321,8 +1321,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1385,34 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1424,17 +1424,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1461,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1473,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2012,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2041,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2088,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2221,7 +2221,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2245,8 +2245,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 
 	for_each_hwfn(edev, i) {
 		for (j = 0; j < reta_size; j++) {
-			idx = j / RTE_RETA_GROUP_SIZE;
-			shift = j % RTE_RETA_GROUP_SIZE;
+			idx = j / RTE_ETH_RETA_GROUP_SIZE;
+			shift = j % RTE_ETH_RETA_GROUP_SIZE;
 			if (reta_conf[idx].mask & (1ULL << shift)) {
 				entry = reta_conf[idx].reta[shift];
 				fid = entry * edev->num_hwfns + i;
@@ -2282,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift)) {
 			entry = qdev->rss_ind_table[i];
 			reta_conf[idx].reta[shift] = entry;
@@ -2718,16 +2718,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	adapter->ipgre.num_filters = 0;
 	if (is_vf) {
 		adapter->vxlan.enable = true;
-		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
 		adapter->geneve.enable = true;
-		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					      ETH_TUNNEL_FILTER_IVLAN;
+		adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					      RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
 		adapter->ipgre.enable = true;
-		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 	} else {
 		adapter->vxlan.enable = false;
 		adapter->geneve.enable = false;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..440440423a32 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -20,97 +20,97 @@ const struct _qede_udp_tunn_types {
 	const char *string;
 } qede_tunn_types[] = {
 	{
-		ETH_TUNNEL_FILTER_OMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC,
 		ECORE_FILTER_MAC,
 		ECORE_TUNN_CLSS_MAC_VLAN,
 		"outer-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_VNI,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_VLAN,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"outer-mac and vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
 		"vni and inner-mac",
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"vni and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_OIP,
+		RTE_ETH_TUNNEL_FILTER_OIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-IP"
 	},
 	{
-		ETH_TUNNEL_FILTER_IIP,
+		RTE_ETH_TUNNEL_FILTER_IIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"inner-IP"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"OMAC_TENID_IMAC"
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index c2263787b4ec..d585db8b61e8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 0440019e07e1..db10f035dfcb 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 431c42f508d0..9c1be10ac93d 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -106,13 +106,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -121,17 +121,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -401,10 +401,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -899,7 +899,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -908,8 +908,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d958fd642fb1..eeb73a7530ef 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -979,11 +979,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 				  SFC_DP_RX_FEAT_INTR |
 				  SFC_DP_RX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index e166fda888b1..67980a587fe4 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -971,16 +971,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS |
 				  SFC_DP_TX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index f5986b610fff..833d833a0408 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -105,19 +105,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -145,8 +145,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -989,16 +989,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -1029,16 +1029,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1313,7 +1313,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1523,9 +1523,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
@@ -1652,7 +1652,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	/*
 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
-	 * hence, conversion is done here to derive a correct set of ETH_RSS
+	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
 	 * flags which corresponds to the active EFX configuration stored
 	 * locally in 'sfc_adapter' and kept up-to-date
 	 */
@@ -1778,8 +1778,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp = entry / RTE_RETA_GROUP_SIZE;
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
 		if ((reta_conf[grp].mask >> grp_idx) & 1)
 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1828,10 +1828,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 		struct rte_eth_rss_reta_entry64 *grp;
 
-		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
 		if (grp->mask & (1ull << grp_idx)) {
 			if (grp->reta[grp_idx] >= rss->channels) {
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 8096af56739f..be2dfe778a0d 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -392,7 +392,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index 5320d8903dac..27b02b1119fb 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -573,66 +573,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c
index 2500b14cb006..9d88d554c1ba 100644
--- a/drivers/net/sfc/sfc_repr.c
+++ b/drivers/net/sfc/sfc_repr.c
@@ -405,7 +405,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 	}
 
 	switch (conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (nb_rx_queues != 1) {
 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
 				 nb_rx_queues);
@@ -420,7 +420,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 			ret = -EINVAL;
 		}
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		break;
 	default:
 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
@@ -428,7 +428,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 		break;
 	}
 
-	if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
 		sfcr_err(sr, "Tx mode MQ modes not supported");
 		ret = -EINVAL;
 	}
@@ -553,8 +553,8 @@ sfc_repr_dev_link_update(struct rte_eth_dev *dev,
 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
 	} else {
 		memset(&link, 0, sizeof(link));
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c60ef17a922a..23df27c8f45a 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -648,9 +648,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -931,7 +931,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -1140,7 +1140,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1166,15 +1166,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
@@ -1211,7 +1211,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1313,19 +1313,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1645,10 +1645,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1665,16 +1665,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1820,7 +1820,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 13392cdd5a09..0273788c20ce 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -529,23 +529,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -876,9 +876,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1242,13 +1242,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 3c6a285e3c5e..6a084e3e1b1b 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1202,10 +1202,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e4f1ad45219e..5d5350d78e03 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1760,7 +1760,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1768,7 +1768,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2262,7 +2262,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2436,7 +2436,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 328d6d56d921..38a2ddc633b5 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -390,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -431,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -479,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = tbl[j];
 	}
@@ -509,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				tbl[j] = reta_conf[i].reta[j];
 	}
@@ -807,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -870,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -992,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1382,11 +1382,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1415,10 +1415,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1582,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1711,7 +1711,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	/* Setup scatter mode if needed by jumbo */
 	if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU */
@@ -1896,8 +1896,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1909,8 +1909,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1920,7 +1920,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1955,7 +1955,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2032,8 +2032,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 5d38750d6313..cb474e26b81e 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,32 +16,32 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 7b46ffb68635..0b0f9db7cb2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -998,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1033,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1053,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1138,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1240,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1254,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1275,25 +1275,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1331,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1357,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1378,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1393,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1414,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1429,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1446,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1495,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1694,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1763,8 +1763,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
@@ -1773,20 +1773,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2601,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2634,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -2713,8 +2713,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2733,34 +2733,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2990,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3221,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3359,16 +3359,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3400,16 +3400,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3576,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3605,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4264,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4628,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4639,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4663,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4678,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4908,7 +4908,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4939,7 +4939,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4979,7 +4979,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4987,7 +4987,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4995,7 +4995,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5003,7 +5003,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5035,7 +5035,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5045,7 +5045,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5055,7 +5055,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5065,7 +5065,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index fd65d89ffe7d..8304b68292da 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -60,15 +60,15 @@
 #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 43dc0ed39b75..283b52e8f3db 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -486,14 +486,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -574,22 +574,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -647,8 +647,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -891,10 +891,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index a48972b1a381..30be2873307a 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -101,15 +101,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -256,13 +256,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -611,29 +611,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 7e18dcce0a86..1204dc5499a5 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1960,7 +1960,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1970,34 +1970,34 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2222,32 +2222,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2349,7 +2349,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2599,7 +2599,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2900,20 +2900,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2930,20 +2930,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2984,39 +2984,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3046,7 +3046,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3083,12 +3083,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3103,7 +3103,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3111,7 +3111,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3130,7 +3130,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3151,7 +3151,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3221,7 +3221,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3237,12 +3237,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3252,7 +3252,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3270,12 +3270,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3285,7 +3285,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3312,7 +3312,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3339,7 +3339,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3475,7 +3475,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3486,8 +3486,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3500,7 +3500,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3511,7 +3511,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3527,15 +3527,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3576,7 +3576,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3592,7 +3592,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3634,7 +3634,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3648,7 +3648,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3719,12 +3719,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3780,7 +3780,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3904,11 +3904,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3931,15 +3931,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3962,21 +3962,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3987,18 +3987,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4028,7 +4028,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4038,13 +4038,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4107,10 +4107,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4118,22 +4118,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4273,7 +4273,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4316,7 +4316,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4344,7 +4344,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4354,7 +4354,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4391,11 +4391,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4410,7 +4410,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4419,7 +4419,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4542,8 +4542,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4851,7 +4851,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4903,7 +4903,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4912,8 +4912,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5084,7 +5084,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a7935a716de9..27f81a5cafc5 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -823,7 +823,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -858,7 +858,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1124,7 +1124,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1273,9 +1273,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 047d3f43a3cf..74ede2aeccc1 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -712,7 +712,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1771,7 +1771,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1785,7 +1785,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1884,7 +1884,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1955,22 +1955,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2086,14 +2086,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2111,20 +2111,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2136,15 +2136,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2156,12 +2156,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2214,7 +2214,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2241,10 +2241,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2437,7 +2437,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2478,28 +2478,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2512,8 +2512,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2523,8 +2523,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2546,32 +2546,32 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
 		/*
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a19895af1f17..26d9edf5319c 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,20 +41,20 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -398,9 +398,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -486,8 +486,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -547,7 +547,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -843,15 +843,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -863,7 +863,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -930,7 +930,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1039,9 +1039,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1365,7 +1365,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1447,10 +1447,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1503,7 +1503,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1573,8 +1573,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1583,8 +1583,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 8950175460f0..ef858ac9512f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index b01c4c01f9c9..870100fa4f11 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 68e3c13730ad..a9fef2297842 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -71,11 +71,11 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -328,7 +328,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 6352a715c0d9..3f41d8e5965d 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -115,17 +115,17 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -149,9 +149,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -241,9 +241,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 8c4a8feec0c2..c681e237ea46 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,15 +80,15 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -126,9 +126,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 1bc675962bf3..cdd9e9b60bd8 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index e26be8edf28f..193a16463449 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,13 +283,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -311,12 +311,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 476b147bdfcc..1b841d46ad93 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,13 +614,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -642,9 +642,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index 8a43f6ac0f92..6185b340600c 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -212,9 +212,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index dd8a33d036ee..bfc1949c8428 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index ccfee585f850..b1aa2767a0af 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,12 +819,12 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -852,9 +852,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 8644454a9aef..0307709f2b4a 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -149,13 +149,13 @@ static struct rte_eth_conf port_conf = {
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -624,7 +624,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 9ba02e687adb..0290767af473 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -77,11 +77,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 4f0e12e62447..a9f9bd477007 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -161,22 +161,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -738,7 +738,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1096,9 +1096,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5f5ec260f315..feddd84d1551 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -234,19 +234,19 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1455,10 +1455,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1909,7 +1909,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2212,8 +2212,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 	local_port_conf.rxmode.mtu = mtu_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2236,12 +2236,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2299,7 +2299,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2660,7 +2660,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 87538dccc879..32670f80bc2b 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -115,8 +115,8 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -620,7 +620,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 1790ec024072..f780be712ec0 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -95,7 +95,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -608,9 +608,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -688,7 +688,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index c646f1748ca7..42c04abbbb34 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -216,11 +216,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1808,7 +1808,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2632,9 +2632,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 9040be5ed9b6..cf3d1b8aaf40 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -14,7 +14,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -22,9 +22,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -60,9 +60,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 06280321b1f2..092ea0189c7f 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -726,7 +726,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -869,9 +869,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 07271affb4a9..78e43f9c091e 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -478,7 +478,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -650,9 +650,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index f3deeba0a665..3edabd1dd19b 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -95,7 +95,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -606,7 +606,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -792,9 +792,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 1890c88a5b01..fea414ae5929 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -124,19 +124,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1936,7 +1936,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2004,7 +2004,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2088,9 +2088,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 05385807e83e..7f00c65609ed 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,17 +111,17 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,7 +607,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -731,7 +731,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -828,9 +828,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 6aa1b66ecfcc..5a4359a368b5 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -250,18 +250,18 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -2197,7 +2197,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2510,7 +2510,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2638,9 +2638,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index f27c76bb7a73..51cbf81f1afa 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,18 +120,18 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -903,7 +903,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -988,7 +988,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1053,15 +1053,15 @@ l3fwd_poll_resource_setup(void)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index e4542df11f87..8714acddd110 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -83,7 +83,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -147,7 +147,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -507,7 +507,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -634,9 +634,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 1ad71ca7ec5f..23307073c904 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -94,7 +94,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -213,7 +213,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 01dc3acf34d5..85955375f1bf 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -176,18 +176,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -218,9 +218,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -392,7 +392,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 4f6982bc1289..b01ac60fd196 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 5de5df997ee9..baeee9298d57 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -307,18 +307,18 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -3441,7 +3441,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3494,7 +3494,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -3593,9 +3593,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 4f20dfc4be06..569207a79d62 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -165,11 +165,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 229a277032cb..979d9eb9e9d0 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -193,14 +193,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index c32d2e12e633..743bae2da50a 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,18 +51,18 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -332,8 +332,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -378,8 +378,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1367569c65db..9b34e4a76b1b 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -60,7 +60,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -105,9 +105,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6845c396b8d9..1903d8b095a1 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -141,17 +141,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 9ebd88bac20e..074fee5b26b2 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -96,7 +96,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -115,9 +115,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -277,7 +277,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index fd7207aee758..16435ee3ccc2 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -49,9 +49,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 999809e6ed41..49c134a3042f 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -110,23 +110,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -134,7 +134,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -291,9 +291,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -557,8 +557,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index e19d79a40802..b159291d77ce 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -73,9 +73,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -270,7 +270,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index ee7f4324e141..1f336082e5c1 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -66,12 +66,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -79,7 +79,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -157,11 +157,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -259,9 +259,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 14c20e6a8b26..1a19f1799bd2 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -60,8 +60,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -69,11 +69,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -81,7 +81,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -89,12 +89,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -103,7 +103,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -157,7 +157,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -173,11 +173,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -271,9 +271,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -382,9 +382,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -404,9 +404,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 0174ba03d7f3..c134b878684e 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -116,7 +116,7 @@ struct rte_eth_dev_data {
 			/**< Device Ethernet link address.
 			 *   @see rte_eth_dev_release_port()
 			 */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 			/**< Bitmap associating MAC addresses to pools. */
 	struct rte_ether_addr *hash_mac_addrs;
 			/**< Device Ethernet MAC addresses of hash filtering.
@@ -1657,23 +1657,23 @@ struct rte_eth_syn_filter {
 /**
  * filter type of tunneling packet
  */
-#define ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
-					ETH_TUNNEL_FILTER_TENID | \
-					ETH_TUNNEL_FILTER_IMAC)
+#define RTE_ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
+#define RTE_ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define RTE_ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define RTE_ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
+
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+						RTE_ETH_TUNNEL_FILTER_IVLAN | \
+						RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC (RTE_ETH_TUNNEL_FILTER_OMAC | \
+					       RTE_ETH_TUNNEL_FILTER_TENID | \
+					       RTE_ETH_TUNNEL_FILTER_IMAC)
 
 /**
  *  Select IPv4 or IPv6 for tunnel filters.
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 1f18aa916cca..7fd916c070e9 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -101,9 +101,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -128,14 +125,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1173,32 +1170,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1503,7 +1500,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t max_rx_pktlen;
 		uint32_t overhead_len;
 
@@ -1560,12 +1557,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2180,7 +2177,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * size is supported by the configured device.
 	 */
 	/* Get the real Ethernet overhead length */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t overhead_len;
 		uint32_t max_rx_pktlen;
 		int ret;
@@ -2760,21 +2757,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2798,14 +2795,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3712,7 +3709,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3799,44 +3796,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3881,17 +3878,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -3968,7 +3965,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -3986,7 +3983,7 @@ eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
 {
 	uint16_t i, num;
 
-	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
 	for (i = 0; i < num; i++) {
 		if (reta_conf[i].mask)
 			return 0;
@@ -4008,8 +4005,8 @@ eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
 			RTE_ETHDEV_LOG(ERR,
@@ -4165,7 +4162,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4191,7 +4188,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4332,8 +4329,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -6242,7 +6239,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 014270d31672..9f0addee116c 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -250,7 +250,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -280,43 +280,75 @@ struct rte_eth_stats {
 /**@{@name Link speed capabilities
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG	RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED	RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD	RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M	RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD	RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M	RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G	RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G	RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G	RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G	RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G	RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G	RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G	RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G	RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G	RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G	RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G	RTE_ETH_LINK_SPEED_200G
 /**@}*/
 
 /**@{@name Link speed
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE	RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M	RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M	RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G	RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G	RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G	RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G	RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G	RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G	RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G	RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G	RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G	RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G	RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G	RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN	RTE_ETH_SPEED_NUM_UNKNOWN
 /**@}*/
 
 /**
@@ -324,21 +356,27 @@ struct rte_eth_stats {
  */
 __extension__
 struct rte_eth_link {
-	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
-	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
-	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /**@{@name Link negotiation
  * Constants used in link management.
  */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX	RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX	RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN		RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP		RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED		RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG	RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 /**@}*/
 
@@ -355,9 +393,12 @@ struct rte_eth_thresh {
 /**@{@name Multi-queue mode
  * @see rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1 /**< Enable RSS. @see rte_eth_rss_conf */
-#define ETH_MQ_RX_DCB_FLAG  0x2 /**< Enable DCB. */
-#define ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG	RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG	RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG	RTE_ETH_MQ_RX_VMDQ_FLAG
 /**@}*/
 
 /**
@@ -366,50 +407,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB,RSS or VMDQ mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For RX side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For RX side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDQ, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDQ */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDQ+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDQ and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the RX features of an Ethernet port.
@@ -422,7 +462,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -437,12 +477,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a vlan filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -513,38 +558,70 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               RTE_BIT64(2)
-#define ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
-#define ETH_RSS_IPV6               RTE_BIT64(8)
-#define ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
-#define ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
-#define ETH_RSS_IPV6_EX            RTE_BIT64(15)
-#define ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
-#define ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
-#define ETH_RSS_PORT               RTE_BIT64(18)
-#define ETH_RSS_VXLAN              RTE_BIT64(19)
-#define ETH_RSS_GENEVE             RTE_BIT64(20)
-#define ETH_RSS_NVGRE              RTE_BIT64(21)
-#define ETH_RSS_GTPU               RTE_BIT64(23)
-#define ETH_RSS_ETH                RTE_BIT64(24)
-#define ETH_RSS_S_VLAN             RTE_BIT64(25)
-#define ETH_RSS_C_VLAN             RTE_BIT64(26)
-#define ETH_RSS_ESP                RTE_BIT64(27)
-#define ETH_RSS_AH                 RTE_BIT64(28)
-#define ETH_RSS_L2TPV3             RTE_BIT64(29)
-#define ETH_RSS_PFCP               RTE_BIT64(30)
-#define ETH_RSS_PPPOE              RTE_BIT64(31)
-#define ETH_RSS_ECPRI              RTE_BIT64(32)
-#define ETH_RSS_MPLS               RTE_BIT64(33)
-#define ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
+#define ETH_RSS_IPV4                   RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
+#define ETH_RSS_FRAG_IPV4              RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
+#define ETH_RSS_NONFRAG_IPV4_TCP       RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
+#define ETH_RSS_NONFRAG_IPV4_UDP       RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP      RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER     RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
+#define ETH_RSS_IPV6                   RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
+#define ETH_RSS_FRAG_IPV6              RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
+#define ETH_RSS_NONFRAG_IPV6_TCP       RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
+#define ETH_RSS_NONFRAG_IPV6_UDP       RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP      RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER     RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
+#define ETH_RSS_L2_PAYLOAD             RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
+#define ETH_RSS_IPV6_EX                RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
+#define ETH_RSS_IPV6_TCP_EX            RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
+#define ETH_RSS_IPV6_UDP_EX            RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               RTE_BIT64(18)
+#define ETH_RSS_PORT                   RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
+#define ETH_RSS_VXLAN                  RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
+#define ETH_RSS_GENEVE                 RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
+#define ETH_RSS_NVGRE                  RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
+#define ETH_RSS_GTPU                   RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                RTE_BIT64(24)
+#define ETH_RSS_ETH                    RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
+#define ETH_RSS_S_VLAN                 RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
+#define ETH_RSS_C_VLAN                 RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                RTE_BIT64(27)
+#define ETH_RSS_ESP                    RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 RTE_BIT64(28)
+#define ETH_RSS_AH                     RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
+#define ETH_RSS_L2TPV3                 RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
+#define ETH_RSS_PFCP                   RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
+#define ETH_RSS_PPPOE                  RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
+#define ETH_RSS_ECPRI                  RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
+#define ETH_RSS_MPLS                   RTE_ETH_RSS_MPLS
+#define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define ETH_RSS_IPV4_CHKSUM            RTE_ETH_RSS_IPV4_CHKSUM
 
 /**
  * The ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
@@ -553,34 +630,41 @@ struct rte_eth_rss_conf {
  * checksum type for constructing the use of RSS offload bits.
  *
  * Due to above reason, some old APIs (and configuration) don't support
- * ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
+ * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
  *
  * For the case that checksum is not used in an UDP header,
  * it takes the reserved value 0 as input for the hash function.
  */
-#define ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define ETH_RSS_L4_CHKSUM              RTE_ETH_RSS_L4_CHKSUM
 
 /*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
  * more specific input set selection. These bits are defined starting
  * from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
-#define ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
-#define ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
-#define ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
-#define ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
-#define ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
+#define ETH_RSS_L3_SRC_ONLY            RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
+#define ETH_RSS_L3_DST_ONLY            RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
+#define ETH_RSS_L4_SRC_ONLY            RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
+#define ETH_RSS_L4_DST_ONLY            RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
+#define ETH_RSS_L2_SRC_ONLY            RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define ETH_RSS_L2_DST_ONLY            RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
- * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * https:tools.ietf.org/html/rfc6052
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
  */
 #define RTE_ETH_RSS_L3_PRE32	   RTE_BIT64(57)
 #define RTE_ETH_RSS_L3_PRE40	   RTE_BIT64(56)
@@ -602,22 +686,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT	RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST	RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST	RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK	RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)	RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -632,219 +721,312 @@ struct rte_eth_rss_conf {
 static inline uint64_t
 rte_eth_rss_hf_refine(uint64_t rss_hf)
 {
-	if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 
-	if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /**< Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64	RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128	RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256	RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512	RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE	RTE_ETH_RETA_GROUP_SIZE
 
 /**@{@name VMDq and DCB maximums */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS	RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES	RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES	RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES	RTE_ETH_DCB_NUM_QUEUES
 /**@}*/
 
 /**@{@name DCB capabilities */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT	RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT	RTE_ETH_DCB_PFC_SUPPORT
 /**@}*/
 
 /**@{@name VLAN offload bits */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
-
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD	RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD	RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD	RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD	RTE_ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK	RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK	RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK	RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK	RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX		RTE_ETH_VLAN_ID_MAX
 /**@}*/
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR	RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY	RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /**@{@name VMDq Rx mode
  * @see rte_eth_vmdq_rx_conf.rx_mode
  */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG	RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC	RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC	RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST	RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST	RTE_ETH_VMDQ_ACCEPT_MULTICAST
 /**@}*/
 
+/** Maximum nb. of vlan per mirror rule */
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS	RTE_ETH_MIRROR_MAX_VLANS
+
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP     0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP	RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT         0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT	RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT       0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT	RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN                0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN		RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN   0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN	RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
+
+/**
+ * A structure used to configure VLAN traffic mirror of an Ethernet port.
+ */
+struct rte_eth_vlan_mirror {
+	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
+	/** VLAN ID list for vlan mirroring. */
+	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
+};
+
+/**
+ * A structure used to configure traffic mirror of an Ethernet port.
+ */
+struct rte_eth_mirror_conf {
+	uint8_t rule_type; /**< Mirroring rule type */
+	uint8_t dst_pool;  /**< Destination pool for this mirror rule. */
+	uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
+	/** VLAN ID setting for VLAN mirroring. */
+	struct rte_eth_vlan_mirror vlan;
+};
+
 /**
  * A structure used to configure 64 entries of Redirection Table of the
  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
@@ -854,7 +1036,7 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 struct rte_eth_rss_reta_entry64 {
 	uint64_t mask;
 	/**< Mask bits indicate which entries need to be updated/queried. */
-	uint16_t reta[RTE_RETA_GROUP_SIZE];
+	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
 	/**< Group of 64 redirection table entries. */
 };
 
@@ -863,38 +1045,44 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDQ configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_dcb_tx_conf {
 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_dcb_tx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_tx_conf {
@@ -920,8 +1108,8 @@ struct rte_eth_vmdq_dcb_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 	/**< Selects a queue in a pool */
 };
 
@@ -932,7 +1120,7 @@ struct rte_eth_vmdq_dcb_conf {
  * Using this feature, packets are routed to a pool of queues. By default,
  * the pool selection is based on the MAC address, the vlan id in the
  * vlan tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
  * selection using only the MAC address. MAC address to pool mapping is done
  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
  * corresponding to the pool id.
@@ -953,7 +1141,7 @@ struct rte_eth_vmdq_rx_conf {
 	struct {
 		uint16_t vlan_id; /**< The vlan id of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
 };
 
 /**
@@ -962,7 +1150,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1046,7 +1234,7 @@ struct rte_eth_rxconf {
 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1075,7 +1263,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1186,12 +1374,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
-	RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+	RTE_ETH_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1222,18 +1415,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1241,11 +1445,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in RX descriptors.
@@ -1262,9 +1471,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** RX queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1273,6 +1482,8 @@ struct rte_fdir_conf {
 	/**< Flex payload configuration. */
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1290,7 +1501,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1299,18 +1510,20 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the RX multi-queue mode, extra advanced
  * configuration settings may be needed.
  */
 struct rte_eth_conf {
-	uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
-				used. ETH_LINK_SPEED_FIXED disables link
+	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+				used. RTE_ETH_LINK_SPEED_FIXED disables link
 				autonegotiation, and a unique speed shall be
 				set. Otherwise, the bitmap defines the set of
 				speeds to be advertised. If the special value
-				ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
 				supported are advertised. */
 	struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
 	struct rte_eth_txmode txmode; /**< Port TX configuration. */
@@ -1336,48 +1549,70 @@ struct rte_eth_conf {
 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
 		/**< Port vmdq TX configuration. */
 	} tx_adv_conf; /**< Port TX DCB configuration (union). */
-	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
-	    is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+	/**
+	 * Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
+	 * is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT.
+	 */
 	uint32_t dcb_capability_en;
-	struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
-	struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+	struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
 };
 
 /**
  * RX offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP  0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP	RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM	RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO     0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO		RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP  0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP	RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP	RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT	0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT	RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER	RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER	0x00002000
+#define DEV_RX_OFFLOAD_SCATTER		RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP	0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP	RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY     0x00008000
+#define DEV_RX_OFFLOAD_SECURITY		RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC	0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC		RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH	0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH	RTE_ETH_RX_OFFLOAD_RSS_HASH
 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
 
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1387,52 +1622,74 @@ struct rte_eth_conf {
 /**
  * TX offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT	RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM   0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM   0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM	RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO     0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO		RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO     0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO		RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT	RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT	RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS	RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**< Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 /**< Device supports optimization for fast release of mbufs.
  *   When set application must guarantee that per-queue all mbufs comes from
  *   the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY	RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO	RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO	RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP	RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1564,7 +1821,7 @@ struct rte_eth_dev_info {
 	uint16_t vmdq_pool_base;  /**< First ID of VMDQ pools. */
 	struct rte_eth_desc_lim rx_desc_lim;  /**< RX descriptors limits */
 	struct rte_eth_desc_lim tx_desc_lim;  /**< TX descriptors limits */
-	uint32_t speed_capa;  /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 	/** Configured number of rx/tx queues */
 	uint16_t nb_rx_queues; /**< Number of RX queues. */
 	uint16_t nb_tx_queues; /**< Number of TX queues. */
@@ -1668,8 +1925,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS	RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL	RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1680,12 +1939,12 @@ struct rte_eth_dcb_tc_queue_mapping {
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 	/** rx queues assigned to tc per Pool */
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 };
 
 /**
@@ -1694,8 +1953,8 @@ struct rte_eth_dcb_tc_queue_mapping {
  */
 struct rte_eth_dcb_info {
 	uint8_t nb_tcs;        /**< number of TCs */
-	uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
-	uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
+	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
 	/** rx queues assigned to tc */
 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
 };
@@ -1719,7 +1978,7 @@ enum rte_eth_fec_mode {
 
 /* A structure used to get capabilities per link speed */
 struct rte_eth_fec_capa {
-	uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
 	uint32_t capa;  /**< FEC capabilities bitmask */
 };
 
@@ -1742,13 +2001,17 @@ struct rte_eth_fec_capa {
 
 /**@{@name L2 tunnel configuration */
 /**< l2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK	RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /**< l2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK	RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /**< l2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK	RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /**< l2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK	RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 /**@}*/
 
 /**
@@ -2059,14 +2322,14 @@ uint16_t rte_eth_dev_count_total(void);
  * @param speed
  *   Numerical speed value in Mbps
  * @param duplex
- *   ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
  * @return
  *   0 if the speed cannot be mapped
  */
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2076,7 +2339,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2170,7 +2433,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -2747,7 +3010,7 @@ const char *rte_eth_link_speed_to_str(uint32_t link_speed);
  *
  * @param str
  *   A pointer to a string to be filled with textual representation of
- *   device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
  *   store default link status text.
  * @param len
  *   Length of available memory at 'str' string.
@@ -3293,10 +3556,10 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  *   The port identifier of the Ethernet device.
  * @param offload_mask
  *   The VLAN Offload bit mask can be mixed use with "OR"
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
@@ -3312,10 +3575,10 @@ int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
  *   The port identifier of the Ethernet device.
  * @return
  *   - (>0) if successful. Bit mask to indicate
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  *   - (-ENODEV) if *port_id* invalid.
  */
 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
@@ -5340,7 +5603,7 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index 2b6efeef8cf5..555580ab4e71 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2890,7 +2890,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index d6f167994411..5a5b6b1e33c1 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -185,7 +185,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -208,7 +208,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -253,7 +253,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -266,7 +266,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index fb03cf1dcf90..29abe8da53cf 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v6] ethdev: add namespace
  2021-10-20 19:23       ` [dpdk-dev] [PATCH v5] " Ferruh Yigit
@ 2021-10-22  2:02         ` Ferruh Yigit
  2021-10-22  6:44           ` Andrew Rybchenko
                             ` (2 more replies)
  0 siblings, 3 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-22  2:02 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Chandubabu Namburu, Rasesh Mody, Shahed Shaikh, Bruce Richardson,
	Konstantin Ananyev, Ruifeng Wang, Rahul Lakkireddy,
	Marcin Wojtas, Michal Krawczyk, Shai Brandes, Evgeny Schemeilin,
	Igor Chauskin, Gagandeep Singh, Gaetan Rivet, Ziyang Xuan,
	Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou, Jingjing Wu,
	Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff, David Marchand

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=true, Size: 1216805 bytes --]

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Cc: David Marchand <david.marchand@redhat.com>
Cc: Thomas Monjalon <thomas@monjalon.net>

v2:
* Updated internal components
* Removed deprecation notice

v3:
* Updated missing macros / structs that David highlighted
* Added release notes update

v4:
* rebased on latest next-net
* depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
* Not able to complete scripts to update user code, although some
  shared by Aman:
  https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
  Sending new version for possible option to get this patch for -rc1 and
  work for scripts later, before release.

v5:
* rebased on latest next-net

v6:
* rebased on latest next-net
---
 app/proc-info/main.c                          |    8 +-
 app/test-eventdev/test_perf_common.c          |    4 +-
 app/test-eventdev/test_pipeline_common.c      |   10 +-
 app/test-flow-perf/config.h                   |    2 +-
 app/test-pipeline/init.c                      |    8 +-
 app/test-pmd/cmdline.c                        |  286 ++---
 app/test-pmd/config.c                         |  200 ++--
 app/test-pmd/csumonly.c                       |   28 +-
 app/test-pmd/flowgen.c                        |    6 +-
 app/test-pmd/macfwd.c                         |    6 +-
 app/test-pmd/macswap_common.h                 |    6 +-
 app/test-pmd/parameters.c                     |   54 +-
 app/test-pmd/testpmd.c                        |   52 +-
 app/test-pmd/testpmd.h                        |    2 +-
 app/test-pmd/txonly.c                         |    6 +-
 app/test/test_ethdev_link.c                   |   68 +-
 app/test/test_event_eth_rx_adapter.c          |    4 +-
 app/test/test_kni.c                           |    2 +-
 app/test/test_link_bonding.c                  |    4 +-
 app/test/test_link_bonding_mode4.c            |    4 +-
 app/test/test_link_bonding_rssconf.c          |   28 +-
 app/test/test_pmd_perf.c                      |   12 +-
 app/test/virtual_pmd.c                        |   10 +-
 doc/guides/eventdevs/cnxk.rst                 |    2 +-
 doc/guides/eventdevs/octeontx2.rst            |    2 +-
 doc/guides/nics/af_packet.rst                 |    2 +-
 doc/guides/nics/bnxt.rst                      |   24 +-
 doc/guides/nics/enic.rst                      |    2 +-
 doc/guides/nics/features.rst                  |  114 +-
 doc/guides/nics/fm10k.rst                     |    6 +-
 doc/guides/nics/intel_vf.rst                  |   10 +-
 doc/guides/nics/ixgbe.rst                     |   12 +-
 doc/guides/nics/mlx5.rst                      |    4 +-
 doc/guides/nics/tap.rst                       |    2 +-
 .../generic_segmentation_offload_lib.rst      |    8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |   18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |    8 +-
 doc/guides/prog_guide/rte_flow.rst            |   34 +-
 doc/guides/prog_guide/rte_security.rst        |    2 +-
 doc/guides/rel_notes/deprecation.rst          |   10 +-
 doc/guides/rel_notes/release_21_11.rst        |    3 +
 doc/guides/sample_app_ug/ipsec_secgw.rst      |    4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |    2 +-
 drivers/bus/dpaa/include/process.h            |   16 +-
 drivers/common/cnxk/roc_npc.h                 |    2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |   20 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |   12 +-
 drivers/net/ark/ark_ethdev.c                  |   16 +-
 drivers/net/atlantic/atl_ethdev.c             |   88 +-
 drivers/net/atlantic/atl_ethdev.h             |   18 +-
 drivers/net/atlantic/atl_rxtx.c               |    6 +-
 drivers/net/avp/avp_ethdev.c                  |   26 +-
 drivers/net/axgbe/axgbe_dev.c                 |    6 +-
 drivers/net/axgbe/axgbe_ethdev.c              |  104 +-
 drivers/net/axgbe/axgbe_ethdev.h              |   12 +-
 drivers/net/axgbe/axgbe_mdio.c                |    2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |    6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |   12 +-
 drivers/net/bnxt/bnxt.h                       |   62 +-
 drivers/net/bnxt/bnxt_ethdev.c                |  172 +--
 drivers/net/bnxt/bnxt_flow.c                  |    6 +-
 drivers/net/bnxt/bnxt_hwrm.c                  |  112 +-
 drivers/net/bnxt/bnxt_reps.c                  |    2 +-
 drivers/net/bnxt/bnxt_ring.c                  |    4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |   28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |    4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |    2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |    2 +-
 drivers/net/bnxt/bnxt_txr.c                   |    4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |   30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |    8 +-
 drivers/net/bonding/eth_bond_private.h        |    4 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |   16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |    6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |   50 +-
 drivers/net/cnxk/cn10k_ethdev.c               |   42 +-
 drivers/net/cnxk/cn10k_rte_flow.c             |    2 +-
 drivers/net/cnxk/cn10k_rx.c                   |    4 +-
 drivers/net/cnxk/cn10k_tx.c                   |    4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |   60 +-
 drivers/net/cnxk/cn9k_rx.c                    |    4 +-
 drivers/net/cnxk/cn9k_tx.c                    |    4 +-
 drivers/net/cnxk/cnxk_ethdev.c                |  112 +-
 drivers/net/cnxk/cnxk_ethdev.h                |   49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |    6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            |  106 +-
 drivers/net/cnxk/cnxk_link.c                  |   14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |    4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |    2 +-
 drivers/net/cxgbe/cxgbe.h                     |   46 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |   42 +-
 drivers/net/cxgbe/cxgbe_main.c                |   12 +-
 drivers/net/dpaa/dpaa_ethdev.c                |  180 +--
 drivers/net/dpaa/dpaa_ethdev.h                |   10 +-
 drivers/net/dpaa/dpaa_flow.c                  |   32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |   47 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              |  138 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |   22 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |    8 +-
 drivers/net/e1000/e1000_ethdev.h              |   18 +-
 drivers/net/e1000/em_ethdev.c                 |   64 +-
 drivers/net/e1000/em_rxtx.c                   |   38 +-
 drivers/net/e1000/igb_ethdev.c                |  158 +--
 drivers/net/e1000/igb_pf.c                    |    2 +-
 drivers/net/e1000/igb_rxtx.c                  |  116 +-
 drivers/net/ena/ena_ethdev.c                  |   70 +-
 drivers/net/ena/ena_ethdev.h                  |    4 +-
 drivers/net/ena/ena_rss.c                     |   74 +-
 drivers/net/enetc/enetc_ethdev.c              |   30 +-
 drivers/net/enic/enic.h                       |    2 +-
 drivers/net/enic/enic_ethdev.c                |   88 +-
 drivers/net/enic/enic_main.c                  |   40 +-
 drivers/net/enic/enic_res.c                   |   50 +-
 drivers/net/failsafe/failsafe.c               |    8 +-
 drivers/net/failsafe/failsafe_intr.c          |    4 +-
 drivers/net/failsafe/failsafe_ops.c           |   78 +-
 drivers/net/fm10k/fm10k.h                     |    4 +-
 drivers/net/fm10k/fm10k_ethdev.c              |  146 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |    6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |   22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          |  136 +--
 drivers/net/hinic/hinic_pmd_rx.c              |   36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |   22 +-
 drivers/net/hns3/hns3_dcb.c                   |   14 +-
 drivers/net/hns3/hns3_ethdev.c                |  352 +++---
 drivers/net/hns3/hns3_ethdev.h                |   12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             |  100 +-
 drivers/net/hns3/hns3_flow.c                  |    6 +-
 drivers/net/hns3/hns3_ptp.c                   |    2 +-
 drivers/net/hns3/hns3_rss.c                   |  108 +-
 drivers/net/hns3/hns3_rss.h                   |   28 +-
 drivers/net/hns3/hns3_rxtx.c                  |   30 +-
 drivers/net/hns3/hns3_rxtx.h                  |    2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |   10 +-
 drivers/net/i40e/i40e_ethdev.c                |  272 ++---
 drivers/net/i40e/i40e_ethdev.h                |   24 +-
 drivers/net/i40e/i40e_flow.c                  |   32 +-
 drivers/net/i40e/i40e_hash.c                  |  158 +--
 drivers/net/i40e/i40e_pf.c                    |   14 +-
 drivers/net/i40e/i40e_rxtx.c                  |    8 +-
 drivers/net/i40e/i40e_rxtx.h                  |    4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |    2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |    8 +-
 drivers/net/i40e/i40e_vf_representor.c        |   48 +-
 drivers/net/iavf/iavf.h                       |   24 +-
 drivers/net/iavf/iavf_ethdev.c                |  178 +--
 drivers/net/iavf/iavf_hash.c                  |  320 ++---
 drivers/net/iavf/iavf_rxtx.c                  |    2 +-
 drivers/net/iavf/iavf_rxtx.h                  |   24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |    4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |    6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |    2 +-
 drivers/net/ice/ice_dcf.c                     |    2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |   86 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |   56 +-
 drivers/net/ice/ice_ethdev.c                  |  180 +--
 drivers/net/ice/ice_ethdev.h                  |   26 +-
 drivers/net/ice/ice_hash.c                    |  290 ++---
 drivers/net/ice/ice_rxtx.c                    |   16 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |    2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |    4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |   28 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |    2 +-
 drivers/net/igc/igc_ethdev.c                  |  138 +--
 drivers/net/igc/igc_ethdev.h                  |   54 +-
 drivers/net/igc/igc_txrx.c                    |   48 +-
 drivers/net/ionic/ionic_ethdev.c              |  138 +--
 drivers/net/ionic/ionic_ethdev.h              |   12 +-
 drivers/net/ionic/ionic_lif.c                 |   36 +-
 drivers/net/ionic/ionic_rxtx.c                |   10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |   64 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              |  285 +++--
 drivers/net/ixgbe/ixgbe_ethdev.h              |   18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |   24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |    2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |   12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |   34 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                |  249 ++--
 drivers/net/ixgbe/ixgbe_rxtx.h                |    4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |    2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |   16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |   16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |   14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |    4 +-
 drivers/net/kni/rte_eth_kni.c                 |    8 +-
 drivers/net/liquidio/lio_ethdev.c             |  114 +-
 drivers/net/memif/memif_socket.c              |    2 +-
 drivers/net/memif/rte_eth_memif.c             |   16 +-
 drivers/net/mlx4/mlx4_ethdev.c                |   32 +-
 drivers/net/mlx4/mlx4_flow.c                  |   30 +-
 drivers/net/mlx4/mlx4_intr.c                  |    8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |   18 +-
 drivers/net/mlx4/mlx4_txq.c                   |   24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |   54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |    6 +-
 drivers/net/mlx5/mlx5.c                       |    4 +-
 drivers/net/mlx5/mlx5.h                       |    2 +-
 drivers/net/mlx5/mlx5_defs.h                  |    6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |    6 +-
 drivers/net/mlx5/mlx5_flow.c                  |   54 +-
 drivers/net/mlx5/mlx5_flow.h                  |   12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |   44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |    4 +-
 drivers/net/mlx5/mlx5_rss.c                   |   10 +-
 drivers/net/mlx5/mlx5_rxq.c                   |   40 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |    8 +-
 drivers/net/mlx5/mlx5_tx.c                    |   30 +-
 drivers/net/mlx5/mlx5_txq.c                   |   58 +-
 drivers/net/mlx5/mlx5_vlan.c                  |    4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |    4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |   32 +-
 drivers/net/mvneta/mvneta_ethdev.h            |   10 +-
 drivers/net/mvneta/mvneta_rxtx.c              |    2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               |  112 +-
 drivers/net/netvsc/hn_ethdev.c                |   70 +-
 drivers/net/netvsc/hn_rndis.c                 |   50 +-
 drivers/net/nfb/nfb_ethdev.c                  |   20 +-
 drivers/net/nfb/nfb_rx.c                      |    2 +-
 drivers/net/nfp/nfp_common.c                  |  122 +-
 drivers/net/nfp/nfp_ethdev.c                  |    2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |    2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |   50 +-
 drivers/net/null/rte_eth_null.c               |   28 +-
 drivers/net/octeontx/octeontx_ethdev.c        |   74 +-
 drivers/net/octeontx/octeontx_ethdev.h        |   30 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |   26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |   96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |   64 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |   12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |   14 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |    8 +-
 drivers/net/octeontx2/otx2_flow.c             |    2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |   36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |    4 +-
 drivers/net/octeontx2/otx2_link.c             |   40 +-
 drivers/net/octeontx2/otx2_mcast.c            |    2 +-
 drivers/net/octeontx2/otx2_ptp.c              |    4 +-
 drivers/net/octeontx2/otx2_rss.c              |   70 +-
 drivers/net/octeontx2/otx2_rx.c               |    4 +-
 drivers/net/octeontx2/otx2_tx.c               |    2 +-
 drivers/net/octeontx2/otx2_vlan.c             |   42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |    6 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |    6 +-
 drivers/net/pcap/pcap_ethdev.c                |   12 +-
 drivers/net/pfe/pfe_ethdev.c                  |   18 +-
 drivers/net/qede/base/mcp_public.h            |    4 +-
 drivers/net/qede/qede_ethdev.c                |  156 +--
 drivers/net/qede/qede_filter.c                |   42 +-
 drivers/net/qede/qede_rxtx.c                  |    2 +-
 drivers/net/qede/qede_rxtx.h                  |   16 +-
 drivers/net/ring/rte_eth_ring.c               |   20 +-
 drivers/net/sfc/sfc.c                         |   30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |   10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |   20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |    4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |    8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |   32 +-
 drivers/net/sfc/sfc_ethdev.c                  |   50 +-
 drivers/net/sfc/sfc_flow.c                    |    2 +-
 drivers/net/sfc/sfc_port.c                    |   52 +-
 drivers/net/sfc/sfc_repr.c                    |   10 +-
 drivers/net/sfc/sfc_rx.c                      |   50 +-
 drivers/net/sfc/sfc_tx.c                      |   50 +-
 drivers/net/softnic/rte_eth_softnic.c         |   12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |   14 +-
 drivers/net/tap/rte_eth_tap.c                 |  104 +-
 drivers/net/tap/tap_rss.h                     |    2 +-
 drivers/net/thunderx/nicvf_ethdev.c           |  102 +-
 drivers/net/thunderx/nicvf_ethdev.h           |   40 +-
 drivers/net/txgbe/txgbe_ethdev.c              |  242 ++--
 drivers/net/txgbe/txgbe_ethdev.h              |   18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |   24 +-
 drivers/net/txgbe/txgbe_fdir.c                |   20 +-
 drivers/net/txgbe/txgbe_flow.c                |    2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |   12 +-
 drivers/net/txgbe/txgbe_pf.c                  |   34 +-
 drivers/net/txgbe/txgbe_rxtx.c                |  308 ++---
 drivers/net/txgbe/txgbe_rxtx.h                |    4 +-
 drivers/net/txgbe/txgbe_tm.c                  |   16 +-
 drivers/net/vhost/rte_eth_vhost.c             |   16 +-
 drivers/net/virtio/virtio_ethdev.c            |  124 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |   72 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |   16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |   16 +-
 examples/bbdev_app/main.c                     |    6 +-
 examples/bond/main.c                          |   14 +-
 examples/distributor/main.c                   |   12 +-
 examples/ethtool/ethtool-app/main.c           |    2 +-
 examples/ethtool/lib/rte_ethtool.c            |   18 +-
 .../pipeline_worker_generic.c                 |   16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |   12 +-
 examples/flow_classify/flow_classify.c        |    4 +-
 examples/flow_filtering/main.c                |   16 +-
 examples/ioat/ioatfwd.c                       |    8 +-
 examples/ip_fragmentation/main.c              |   12 +-
 examples/ip_pipeline/link.c                   |   20 +-
 examples/ip_reassembly/main.c                 |   18 +-
 examples/ipsec-secgw/ipsec-secgw.c            |   32 +-
 examples/ipsec-secgw/sa.c                     |    8 +-
 examples/ipv4_multicast/main.c                |    6 +-
 examples/kni/main.c                           |    8 +-
 examples/l2fwd-crypto/main.c                  |   10 +-
 examples/l2fwd-event/l2fwd_common.c           |   10 +-
 examples/l2fwd-event/main.c                   |    2 +-
 examples/l2fwd-jobstats/main.c                |    8 +-
 examples/l2fwd-keepalive/main.c               |    8 +-
 examples/l2fwd/main.c                         |    8 +-
 examples/l3fwd-acl/main.c                     |   18 +-
 examples/l3fwd-graph/main.c                   |   14 +-
 examples/l3fwd-power/main.c                   |   16 +-
 examples/l3fwd/l3fwd_event.c                  |    4 +-
 examples/l3fwd/main.c                         |   18 +-
 examples/link_status_interrupt/main.c         |   10 +-
 .../client_server_mp/mp_server/init.c         |    4 +-
 examples/multi_process/symmetric_mp/main.c    |   14 +-
 examples/ntb/ntb_fwd.c                        |    6 +-
 examples/packet_ordering/main.c               |    4 +-
 .../performance-thread/l3fwd-thread/main.c    |   16 +-
 examples/pipeline/obj.c                       |   20 +-
 examples/ptpclient/ptpclient.c                |   10 +-
 examples/qos_meter/main.c                     |   16 +-
 examples/qos_sched/init.c                     |    6 +-
 examples/rxtx_callbacks/main.c                |    8 +-
 examples/server_node_efd/server/init.c        |    8 +-
 examples/skeleton/basicfwd.c                  |    4 +-
 examples/vhost/main.c                         |   26 +-
 examples/vm_power_manager/main.c              |    6 +-
 examples/vmdq/main.c                          |   20 +-
 examples/vmdq_dcb/main.c                      |   40 +-
 lib/ethdev/ethdev_driver.h                    |   36 +-
 lib/ethdev/rte_ethdev.c                       |  181 ++-
 lib/ethdev/rte_ethdev.h                       | 1035 +++++++++++------
 lib/ethdev/rte_flow.h                         |    2 +-
 lib/gso/rte_gso.c                             |   20 +-
 lib/gso/rte_gso.h                             |    4 +-
 lib/mbuf/rte_mbuf_core.h                      |    8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |    2 +-
 339 files changed, 6645 insertions(+), 6390 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index bfe5ce825b70..a4271047e693 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 660d5a0364b6..31d1b0e14653 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,13 +668,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 2775e72c580d..d202091077a6 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 3221f6e1aa40..ebea13f86ab0 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "ipv4-chksum"))
-		rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+		rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
 			return -1;
 		}
 
-		idx = hash_index / RTE_RETA_GROUP_SIZE;
-		shift = hash_index % RTE_RETA_GROUP_SIZE;
+		idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+		shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[idx].mask |= (1ULL << shift);
 		reta_conf[idx].reta[shift] = nb_queue;
 	}
@@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
 	char *end;
 	char *str_fld[8];
 	uint16_t i;
-	uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 	int ret;
 
 	p = strchr(p0, '(');
@@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4615,55 +4615,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cad78350dcc9..a18871d461c4 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,62 +86,62 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
-	{ "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
-	{ "l4-chksum", ETH_RSS_L4_CHKSUM },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
+	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
 	{ NULL, 0 },
 };
 
@@ -538,39 +538,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -723,9 +723,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -743,22 +743,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -2953,8 +2953,8 @@ port_rss_reta_info(portid_t port_id,
 	}
 
 	for (i = 0; i < nb_entries; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3427,7 +3427,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4490,11 +4490,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4520,11 +4520,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4565,11 +4565,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4595,11 +4595,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4669,7 +4669,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4678,7 +4678,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4686,7 +4686,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4705,7 +4705,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4713,8 +4713,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4723,8 +4723,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -5130,7 +5130,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 090797318a35..75b24487e72e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 7ebed9fed334..03d026dec169 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index ee76df7f0323..57e00bca20e7 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index afc75f6bd213..cb40917077ea 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -547,29 +547,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -1002,13 +1002,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -1050,34 +1050,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1099,13 +1099,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1495,12 +1495,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6d5bbc82404e..abfa8395ccdc 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -349,7 +349,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -460,12 +460,12 @@ lcoreid_t latencystats_lcore_id = -1;
 struct rte_eth_rxmode rx_mode;
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -524,7 +524,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1578,9 +1578,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1717,8 +1717,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3769,17 +3769,17 @@ init_port_config(void)
 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			} else {
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 				port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 				for (i = 0;
 				     i < port->dev_info.nb_rx_queues;
 				     i++)
 					port->rx_conf[i].offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 			}
 		}
 
@@ -3867,9 +3867,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3877,7 +3877,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3885,8 +3885,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3902,23 +3902,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3947,7 +3947,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3997,7 +3997,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(pid);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index bf3669134aa0..cd1e623ad67a 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -493,7 +493,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 extern uint32_t max_rx_pkt_len;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e45f8840c91c..9eb7992815e8 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..6248aea49abd 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index add4d8a67821..a09253e91814 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 5388d18125a6..8a9ef851789f 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 189d2430f27e..351129de2f9b 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index e7bb0497b663..f9eae9397386 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -52,7 +52,7 @@ struct slave_conf {
 
 	struct rte_eth_rss_conf rss_conf;
 	uint8_t rss_key[40];
-	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t is_slave;
 	struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
 struct link_bonding_rssconf_unittest_params {
 	uint8_t bond_port_id;
 	struct rte_eth_dev_info bond_dev_info;
-	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 	struct slave_conf slave_ports[SLAVE_COUNT];
 
 	struct rte_mempool *mbuf_pool;
@@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
@@ -207,13 +207,13 @@ bond_slaves(void)
 static int
 reta_set(uint16_t port_id, uint8_t value, int reta_size)
 {
-	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
 	int i, j;
 
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
 		/* select all fields to set */
 		reta_conf[i].mask = ~0LL;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			reta_conf[i].reta[j] = value;
 	}
 
@@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
 	for (i = 0; i < test_params.bond_dev_info.reta_size;
 			i++) {
 
-		int index = i / RTE_RETA_GROUP_SIZE;
-		int shift = i % RTE_RETA_GROUP_SIZE;
+		int index = i / RTE_ETH_RETA_GROUP_SIZE;
+		int shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (port->reta_conf[index].reta[shift] !=
 				test_params.bond_reta_conf[index].reta[shift])
@@ -251,7 +251,7 @@ static int
 bond_reta_fetch(void) {
 	unsigned j;
 
-	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
 			j++)
 		test_params.bond_reta_conf[j].mask = ~0LL;
 
@@ -268,7 +268,7 @@ static int
 slave_reta_fetch(struct slave_conf *port) {
 	unsigned j;
 
-	for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+	for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
 		port->reta_conf[j].mask = ~0LL;
 
 	TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index a3b4f52c65e6..1df86ce080e5 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7e15b47eb0fb..d9f2e4f66bde 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -562,9 +562,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/nics/af_packet.rst b/doc/guides/nics/af_packet.rst
index bdd6e7263c85..54feffdef4bd 100644
--- a/doc/guides/nics/af_packet.rst
+++ b/doc/guides/nics/af_packet.rst
@@ -70,5 +70,5 @@ Features and Limitations
 ------------------------
 
 The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
 application.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index aa6032889a55..b3d10f30dc77 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index d35751d5b5a7..594e98a6b803 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -177,7 +177,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -205,12 +205,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -221,12 +221,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -287,9 +287,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -302,7 +302,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -339,7 +339,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -362,7 +362,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -378,7 +378,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -416,13 +416,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -438,14 +438,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -459,7 +459,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -469,13 +469,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -487,14 +487,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -508,7 +508,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -519,16 +519,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -538,8 +538,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -547,8 +547,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -557,10 +557,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -570,11 +570,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -584,16 +584,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -603,15 +603,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_shared_rx_queue:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index ed6afd62703d..bba53f5a64ee 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41bb4..a1e236ad75e5 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -216,21 +216,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 20a74b9b5bcd..148d2f5fc2be 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -163,13 +163,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index dd059b227d8e..86927a0b56b0 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -611,7 +611,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, MTU can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate MTU as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index a2169517c3f9..d798adb83e1d 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1993,23 +1993,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index ad92c16868c1..46c9b51d1bf9 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index cc2b89850b07..f11550dc78ac 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -69,22 +69,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 569d3c00b9ee..b327c2bfca1c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -446,6 +446,9 @@ ABI Changes
 * bbdev: Added capability related to more comprehensive CRC options,
   shifting values of the ``enum rte_bbdev_op_ldpcdec_flag_bitmasks``.
 
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+  updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
 
 Known Issues
 ------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index d23e0b6a7a2e..30edef07ea20 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -546,7 +546,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index ef85073b17e1..e13d55713625 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -167,7 +167,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index a077376dc0fb..8f778f0c2419 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 	const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
 	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	return 0;
 }
 
@@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index b362ccdcd38c..e156246f24df 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -652,7 +652,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -661,7 +661,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5a198f53fce7..f7bfac796c07 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index fbc9917ed30d..ed9ef9f0cc52 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 0d3460383a50..2ff426892df2 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 932ec90265cf..5d94db02c506 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index ca32ad641873..3aaa2193272f 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 0250256830ac..dab0c6775d1d 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
 
 	max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index c8618d2d6daa..aa2c27ebaa49 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 567ea2382864..78fc717ec44a 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 6743cf92b0e6..39bd739c7bc9 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,37 +569,37 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index f385723a9f65..2791a5c62db1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
 	bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < reta_size; i++) {
 		struct bnxt_rx_queue *rxq;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (!(reta_conf[idx].mask & (1ULL << sft)))
 			continue;
@@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 	}
 
 	for (idx = 0, i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
@@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		tunnel_type =
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index b2ebb5634e3a..ced697a73980 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
 	}
 
 	/* If RSS types is 0, use a best effort configuration */
-	types = rss->types ? rss->types : ETH_RSS_IPV4;
+	types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
 
 	hash_type = bnxt_rte_to_hwrm_hash_types(types);
 
@@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 181e607d7bf8..82e89b7c8af7 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index b7e88e013a84..1c07db3ca9c5 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 08cefa1baaef..7940d489a102 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 			rx_ring_info->rx_ring_struct->ring_size *
 			AGG_RING_SIZE_FACTOR)) : 0;
 
-		if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+		if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 			int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 			tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 					    ag_bitmap_start, ag_bitmap_len);
 
 			/* TPA info */
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 				rx_ring_info->tpa_info =
 					((struct bnxt_tpa_info *)
 					 ((char *)mz->addr + tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 38ec4aa14b77..1456f8b54ffa 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aeacc60a0127..eb555c4545e6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 9e45ddd7a82e..f2fcaf53021c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..8b104b639184 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,8 +167,8 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
 	uint8_t rss_key_len;				/**< hash key length in bytes. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2029955c1092..ca50583d62d8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 5140ef14c2ee..84943cffe2bb 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 8d038ba6b6c4..834a5937b3aa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		/* rss_key won't be empty if RSS is configured in bonded dev */
 		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.mtu =
 			bonded_eth_dev->data->dev_conf.rxmode.mtu;
@@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < reta_count; i++) {
 		internals->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
 
@@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		struct rte_eth_rss_conf *rss_conf =
 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
 		if (rss_conf->rss_key != NULL) {
@@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 
 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
 			internals->reta_conf[i].mask = ~0LL;
-			for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 				internals->reta_conf[i].reta[j] =
-						(i * RTE_RETA_GROUP_SIZE + j) %
+						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
 						dev->data->nb_rx_queues;
 		}
 	}
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 25da5f6691d0..f7eb0f437b77 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rte_flow.c b/drivers/net/cnxk/cn10k_rte_flow.c
index 8c87452934eb..dff4c7746cf5 100644
--- a/drivers/net/cnxk/cn10k_rte_flow.c
+++ b/drivers/net/cnxk/cn10k_rte_flow.c
@@ -98,7 +98,7 @@ cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index d6af54b56de6..5d603514c045 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index eb962ef08cab..5e6c5ee11188 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 08c86f9e6b7b..17f8f6debbc8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 5c4387e74e0b..8d504c4a6d92 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index e5691a2a7e16..f3f19fed9780 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2e05d8bf1552..db54468dbca1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	struct roc_nix *nix = &dev->nix;
 	int i, rc = 0;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
@@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		cnxk_nix_inb_mode_set(dev, true);
 	}
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		struct plt_bitmap *bmap;
 		size_t bmap_sz;
 		void *mem;
@@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 
 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
 
-		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
-		if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+		/* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
 			goto done;
 
 		rc = -ENOMEM;
@@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 done:
 	return 0;
 cleanup:
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		rc |= roc_nix_inl_inb_fini(nix);
 	return rc;
 }
@@ -182,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;
 
 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -199,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	}
 
 	/* Cleanup Inline outbound */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy outbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -242,8 +242,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -273,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -281,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -305,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -352,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -380,7 +380,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* When Tx Security offload is enabled, increase tx desc count by
 	 * max possible outbound desc count.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		nb_desc += dev->outb.nb_desc;
 
 	/* Setup ROC SQ */
@@ -499,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * to avoid meta packet drop as LBK does not currently support
 	 * backpressure.
 	 */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
 		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
 
 		/* Use current RQ's aura limit if inl rq is not available */
@@ -561,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rxq_sp->qconf.nb_desc = nb_desc;
 	rxq_sp->qconf.mp = mp;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup rq reference for inline dev if present */
 		rc = roc_nix_inl_dev_rq_get(rq);
 		if (rc)
@@ -579,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -618,7 +618,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);
 
 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		roc_nix_inl_dev_rq_put(rq);
 
 	/* Cleanup ROC RQ */
@@ -657,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -683,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -746,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -958,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -1007,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -1054,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1062,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
 		 * platform does not support it.
@@ -1454,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 72f80ae948cf..29a3540ed3f8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -58,41 +58,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH |            \
-	 DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP |                \
-	 DEV_RX_OFFLOAD_SECURITY)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |    \
+	 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
+	 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
+	 RTE_ETH_RX_OFFLOAD_SECURITY)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index c0b949e21ab0..e068f553495c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index d0924df76152..67464302653d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 		goto fail;
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = reta[idx];
 			idx++;
@@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 6a7080167598..f10a502826c6 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index dfc33ba8654a..b08d7c34faa9 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 37625c5bfb69..dbcbfaf68a30 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,31 +28,31 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index f77b2976002c..4758321778d1 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 91d6bb9bbcb0..f1ac32270961 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c79cdb8d8ad7..89ea7dd47c0b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,29 +54,29 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (max_rx_pktlen <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
 				"MaxSGlist %d",
@@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 08f49af7685d..3170694841df 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
-			case ETH_RSS_ETH:
-
+			case RTE_ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_ETH:
 				if (l2_configured)
 					break;
 				l2_configured = 1;
@@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_PPPOE:
+			case RTE_ETH_RSS_PPPOE:
 				if (pppoe_configured)
 					break;
 				kg_cfg->extracts[i].extract.from_hdr.prot =
@@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_ESP:
+			case RTE_ETH_RSS_ESP:
 				if (esp_configured)
 					break;
 				esp_configured = 1;
@@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_AH:
+			case RTE_ETH_RSS_AH:
 				if (ah_configured)
 					break;
 				ah_configured = 1;
@@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_C_VLAN:
-			case ETH_RSS_S_VLAN:
+			case RTE_ETH_RSS_C_VLAN:
+			case RTE_ETH_RSS_S_VLAN:
 				if (vlan_configured)
 					break;
 				vlan_configured = 1;
@@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a0270e78520e..59e728577f53 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,33 +38,33 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return -1;
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fdc62ec30d22..c5e9267bf04d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,17 +65,17 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS | \
-	ETH_RSS_C_VLAN | \
-	ETH_RSS_S_VLAN | \
-	ETH_RSS_ESP | \
-	ETH_RSS_AH | \
-	ETH_RSS_PPPOE)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS | \
+	RTE_ETH_RSS_C_VLAN | \
+	RTE_ETH_RSS_S_VLAN | \
+	RTE_ETH_RSS_ESP | \
+	RTE_ETH_RSS_AH | \
+	RTE_ETH_RSS_PPPOE)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 7d5d6377859a..a548ae2ccb2c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -82,15 +82,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 73152dec6ed1..9da477e59def 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -597,8 +597,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -611,39 +611,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1102,9 +1102,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1162,17 +1162,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1424,15 +1424,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1601,7 +1601,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1683,13 +1683,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 344149c19147..648b04154c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -173,7 +173,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1171,11 +1171,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1369,13 +1369,13 @@ em_get_rx_port_offloads_capa(void)
 	uint64_t rx_offload_capa;
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return rx_offload_capa;
 }
@@ -1469,7 +1469,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1788,7 +1788,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1831,7 +1831,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1844,7 +1844,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1870,7 +1870,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index dbe811a1ad2f..ae3bc4a9c201 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1073,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1099,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1117,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1146,8 +1146,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1287,8 +1287,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1296,7 +1296,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1310,39 +1310,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2185,21 +2185,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2225,7 +2225,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2251,9 +2251,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2296,12 +2296,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2402,17 +2402,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2588,7 +2588,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2703,22 +2703,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2870,7 +2870,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3024,13 +3024,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3099,18 +3099,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3258,22 +3258,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3571,16 +3571,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -3612,16 +3612,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a1d5eecc14a1..bcce2fc726d8 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -186,7 +186,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1459,13 +1459,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1640,19 +1640,19 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1733,7 +1733,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1950,23 +1950,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2032,23 +2032,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2170,15 +2170,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2214,9 +2214,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	/* VLVF: set up filters for vlan tags as configured */
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
-		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
-			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+			(cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
 			E1000_VLVF_POOLSEL_MASK)));
 	}
 
@@ -2268,7 +2268,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2282,14 +2282,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2338,7 +2338,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2374,7 +2374,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2444,7 +2444,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2488,16 +2488,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2505,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2545,7 +2545,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2743,7 +2743,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f3b17d70c9a4..4d2601d15a57 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -117,10 +117,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -332,7 +332,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -340,7 +340,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -357,12 +357,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -643,9 +643,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -923,7 +923,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -2004,9 +2004,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Scattered Rx cannot be turned off in the HW, so this capability must
 	 * be forced.
@@ -2067,17 +2067,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if (adapter->offloads.rx_offloads &
 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
 		port_offloads |=
-			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-	port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return port_offloads;
 }
@@ -2087,17 +2087,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 	if (adapter->offloads.tx_offloads &
 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
 		port_offloads |=
-			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
-	port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return port_offloads;
 }
@@ -2130,14 +2130,14 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
@@ -2303,7 +2303,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
@@ -2416,11 +2416,11 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
 		/* Check if requested offload is also enabled for the queue */
 		if ((ol_flags & PKT_TX_IP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_TCP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_UDP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
 			PMD_TX_LOG(DEBUG,
 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
 				i, m->nb_segs, tx_ring->id);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 4f4142ed12d0..865e1241e0ce 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -58,8 +58,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 152098410fa2..be4007e3f3fe 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 		/* Each reta_conf is for 64 entries.
 		 * To support 128 we use 2 conf of 64.
 		 */
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
 			entry_value =
 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -139,7 +139,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -154,8 +154,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0 ; i < reta_size ; i++) {
-		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
 			reta_conf[reta_conf_idx].reta[reta_idx] =
 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -199,34 +199,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -235,10 +235,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -247,10 +247,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -268,11 +268,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -284,11 +284,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -334,43 +334,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -541,7 +541,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 1b567f01eae0..7cdb8ce463ed 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,10 +207,10 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
 
 	return 0;
 }
@@ -463,7 +463,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -705,7 +705,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -713,10 +713,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cfdd..d5493c98345d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -178,7 +178,7 @@ struct enic {
 	 */
 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
 	uint8_t rss_enable;
-	uint64_t rss_hf; /* ETH_RSS flags */
+	uint64_t rss_hf; /* RTE_ETH_RSS flags */
 	union vnic_rss_key rss_key;
 	union vnic_rss_cpu rss_cpu;
 
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8df7332bc5e0..c8bdaf1a8e79 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -297,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -323,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -435,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -774,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -806,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
 	 */
 	rss_cpu = enic->rss_cpu;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			rss_cpu.cpu[i / 4].b[i % 4] =
 				enic_rte_rq_idx_to_sop_idx(
@@ -883,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -969,8 +969,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1010,7 +1010,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1039,7 +1039,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index dfc7f5d1f94f..21b1fffb14f0 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,9 +1745,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c5777772a09e..918a9e170ff6 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,19 +201,19 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index b87c036e6014..82d595b1d1a0 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 29de39910c6e..a3a8a1c82e3a 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1172,51 +1172,51 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 17c73c4dc5ae..b7522a47a80b 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 66f4a5c6df2c..d256334bfde9 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,20 +1767,20 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1965,12 +1965,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2111,8 +2111,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2160,8 +2160,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2198,15 +2198,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2243,15 +2243,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2606,7 +2606,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2622,7 +2622,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index c2374ebb6759..4cd5a85d5f8d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,24 +732,24 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -846,20 +846,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -901,8 +901,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1650,8 +1650,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1672,8 +1672,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1859,13 +1859,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1879,14 +1879,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1930,7 +1930,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1951,14 +1951,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -1994,7 +1994,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2015,15 +2015,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2053,7 +2053,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2067,8 +2067,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 
 	/* update rss indir_tbl */
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2133,8 +2133,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 8753c340e790..3d0159d78778 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 693048f58704..8e0ccecb57a6 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2213,9 +2213,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2223,7 +2223,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2232,7 +2232,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2400,11 +2400,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2464,15 +2464,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2493,7 +2493,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2600,15 +2600,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2619,19 +2619,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2650,7 +2650,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2676,40 +2676,40 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_get_support(hw, PTP))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2793,7 +2793,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2806,29 +2806,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2848,8 +2848,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2861,7 +2861,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3207,31 +3207,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3559,39 +3559,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4254,14 +4254,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4511,7 +4511,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4552,7 +4552,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4672,8 +4672,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4684,8 +4684,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4735,7 +4735,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4762,7 +4762,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4881,10 +4881,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4912,7 +4912,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5094,19 +5094,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5344,20 +5344,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5373,26 +5373,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5427,28 +5427,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5456,11 +5456,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5594,9 +5594,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5869,7 +5869,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6080,17 +6080,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6236,7 +6236,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6536,7 +6536,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6826,7 +6826,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6858,7 +6858,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -6957,12 +6957,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e28056b1bd60..0f55fd4c83ad 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -190,10 +190,10 @@ struct hns3_mac {
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1076,9 +1076,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 54dbd4b798f2..7b784048b518 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -935,32 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1640,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1653,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1724,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1749,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2059,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2218,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2570,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 38a2ee58a651..da6918fddda3 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index 5dfe68cc4dbd..9a829d7011ad 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..85495bbe89d9 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
 	memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
 	       sizeof(rss_cfg->rss_indirection_tbl));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
 			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_lock(&hw->lock);
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
 						rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 602548a4f25b..920ee8ceeab9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1924,7 +1924,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1969,7 +1969,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2845,7 +2845,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3139,7 +3139,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4291,7 +4291,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4303,16 +4303,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index c8229e9076b5..dfea5d5b4c2f 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index ff434d2d33ed..455110361aac 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_get_support(hw, PTP))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0a4db0891d4a..293df887bf7c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1629,7 +1629,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1896,8 +1896,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1931,13 +1931,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2214,17 +2214,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2332,13 +2332,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2876,34 +2876,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2930,8 +2930,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2946,28 +2946,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -2984,9 +2984,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3720,33 +3720,33 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3805,7 +3805,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3819,17 +3819,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3868,7 +3868,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3915,12 +3915,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3934,12 +3934,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3998,37 +3998,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4111,17 +4111,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4137,10 +4137,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4287,7 +4287,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4440,7 +4440,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4456,8 +4456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4483,7 +4483,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4500,8 +4500,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -4818,7 +4818,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6104,10 +6104,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6236,9 +6236,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7098,7 +7098,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7133,7 +7133,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7727,25 +7727,25 @@ static int
 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
 {
 	switch (filter_type) {
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_OIP:
+	case RTE_ETH_TUNNEL_FILTER_OIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
 		break;
-	case ETH_TUNNEL_FILTER_IIP:
+	case RTE_ETH_TUNNEL_FILTER_IIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 		break;
 	default:
@@ -8711,16 +8711,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8746,12 +8746,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8843,7 +8843,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8851,7 +8851,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8930,7 +8930,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10267,16 +10267,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10504,7 +10504,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11012,7 +11012,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11060,7 +11060,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1d57b9617e66..d8042abbd9be 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -147,17 +147,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1063,7 +1063,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e41a84f1d737..9acaa1875105 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
@@ -3601,13 +3601,13 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
 }
 
 static uint16_t i40e_supported_tunnel_filter_types[] = {
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
-	ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IMAC,
-	ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC,
 };
 
 static int
@@ -3697,12 +3697,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 			break;
@@ -3724,7 +3724,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -3798,7 +3798,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					   vxlan_spec->vni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			vxlan_flag = 1;
@@ -3927,12 +3927,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 
@@ -3955,7 +3955,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -4050,7 +4050,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					   nvgre_spec->tni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			nvgre_flag = 1;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 5da3d187076e..8962e9d97aa7 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -105,47 +105,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -208,30 +208,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -239,72 +239,72 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -319,27 +319,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -575,29 +575,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -836,7 +836,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -943,7 +943,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1081,22 +1081,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 554b1142c136..a13bb81115f4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -1995,7 +1995,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2243,7 +2243,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -3417,7 +3417,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 2301e6301d7d..5e6eecc50116 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -166,7 +166,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 4ffe030fcb64..7abc0821d119 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -900,7 +900,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52e3c567558..f9a7f4655050 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 12d5a2e48a9b..663c46b91dc5 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af4734..12f541f53926 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -50,18 +50,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 611f1f7722b0..df44df772e4e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -266,53 +266,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -331,13 +331,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -363,10 +363,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -467,7 +467,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -479,10 +479,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -512,8 +512,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -611,7 +611,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -961,34 +961,34 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1048,42 +1048,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1231,14 +1231,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1267,9 +1267,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1311,8 +1311,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(lut, vf->rss_lut, reta_size);
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -1348,8 +1348,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = vf->rss_lut[i];
 	}
@@ -1556,7 +1556,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 01724cd569dd..55d8a11da388 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -395,90 +395,90 @@ struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -496,7 +496,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -538,9 +538,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -565,7 +565,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -607,7 +607,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -648,52 +648,52 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
@@ -855,28 +855,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -884,39 +884,39 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -933,7 +933,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -941,87 +941,87 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -1038,7 +1038,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1163,10 +1163,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1177,27 +1177,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1207,9 +1207,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1226,15 +1226,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c1027..ac4db117f5cd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -617,7 +617,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e123..2d7f6b1b2dca 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 72a4fcab04a5..b47c51b8ebe4 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -906,7 +906,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 		    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -958,7 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					(_mm256_castsi128_si256(raw_desc_bh0),
 					raw_desc_bh1, 1);
 
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 12375d3d80bd..b8f2f69f12fc 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1141,7 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1193,7 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						(_mm256_castsi128_si256(raw_desc_bh0),
 						 raw_desc_bh1, 1);
 
-				if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+				if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1721,7 +1721,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e298..1de43b9b8ee2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -819,7 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index c9c01a14e349..7b7df5eebb6d 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -835,7 +835,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index ebd8ca57ef5f..1cda2db00e56 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -95,7 +95,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -582,7 +582,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -644,7 +644,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 	hw->tm_conf.committed = false;
 
@@ -660,8 +660,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -683,27 +683,27 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -933,42 +933,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -987,11 +987,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -1018,8 +1018,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 44fb38dbe7b1..b9fcfc80ad9b 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -143,28 +143,28 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -246,9 +246,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -345,7 +345,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -449,7 +449,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index edbc74632711..6a6637a15af7 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1487,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2993,14 +2993,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -3010,7 +3010,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3020,7 +3020,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3030,7 +3030,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3041,7 +3041,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3052,7 +3052,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3063,7 +3063,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3074,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3085,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3095,7 +3095,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3105,7 +3105,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3115,7 +3115,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3125,7 +3125,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3135,7 +3135,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3145,7 +3145,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3288,8 +3288,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3569,8 +3569,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3682,40 +3682,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH |
-			DEV_RX_OFFLOAD_TIMESTAMP;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH |
+			RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3754,24 +3754,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3836,8 +3836,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3853,55 +3853,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -4377,15 +4377,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -4500,8 +4500,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4550,8 +4550,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -5460,7 +5460,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5484,7 +5484,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
@@ -5505,7 +5505,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
 	int ret;
 
 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
 		return -1;
 	}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1cd3753ccc5f..599e0028f7e8 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -117,19 +117,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 20a3204fab7e..35eff8b17d28 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,87 +373,87 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -472,17 +472,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -647,86 +647,86 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		}
 
-		if (rss_type & ETH_RSS_IPV4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -735,10 +735,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -747,10 +747,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -762,81 +762,81 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -870,7 +870,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -892,10 +892,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -907,9 +907,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -928,16 +928,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9f5..8406240d7209 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -303,7 +303,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 		}
 	}
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		/* Register mbuf field and flag for Rx timestamp */
 		err = rte_mbuf_dyn_rx_timestamp_register(
 				&ice_timestamp_dynfield_offset,
@@ -367,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1117,7 +1117,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1624,7 +1624,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 				ts_ns = ice_tstamp_convert_32b_64b(hw,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 				if (ice_timestamp_dynflag > 0) {
@@ -1942,7 +1942,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2373,7 +2373,7 @@ ice_recv_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2889,7 +2889,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3365,7 +3365,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 490693bff218..86955539bea8 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -474,7 +474,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 7efe7b50a206..af23f6a34e58 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -585,7 +585,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -995,7 +995,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index f0f99265857e..b1d975b31a5a 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
@@ -287,7 +287,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		return -1;
 
 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 2a1ed90b641b..7ce80a442b35 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -307,8 +307,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -318,7 +318,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -334,8 +334,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -473,12 +473,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -490,9 +490,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -525,7 +525,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -972,18 +972,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -993,33 +993,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1482,14 +1482,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1515,9 +1515,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2141,13 +2141,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2179,16 +2179,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2234,29 +2234,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to update the register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* check mask whether need to read the register value first */
@@ -2290,29 +2290,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to read register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* read register and get the queue index */
@@ -2369,23 +2369,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2514,22 +2514,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2547,7 +2547,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 5e6c2ff30157..f56cad79e939 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -66,37 +66,37 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index 56132e8c6cd6..1d34ae2e1b15 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -847,23 +847,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1037,10 +1037,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1111,7 +1111,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1177,7 +1177,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1221,20 +1221,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1242,7 +1242,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1279,12 +1279,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2253,10 +2253,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index f94a1fed0a38..c688c3735c06 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -387,17 +387,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -411,24 +411,24 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -463,9 +463,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -487,14 +487,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -545,12 +545,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = tbl_sz / RTE_RETA_GROUP_SIZE;
+	num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if (reta_conf[i].mask & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -585,12 +585,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-			&lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
-			RTE_RETA_GROUP_SIZE);
+			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+			RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -618,17 +618,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -660,17 +660,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -842,15 +842,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -874,12 +874,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -896,7 +896,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index a1f9ce2d81cb..5e8fdf3893ad 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 4d16a39c6b6d..e3df7c56debe 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -203,11 +203,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -743,11 +743,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 063a9c6a6f7f..17088585757f 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,30 +67,30 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2399,10 +2399,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2460,9 +2460,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2518,9 +2518,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 46c95425adfb..7fd2c539e002 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1857,7 +1857,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1872,7 +1872,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1959,10 +1959,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2083,7 +2083,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2100,7 +2100,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2122,17 +2122,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2143,19 +2143,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		ixgbe_vlan_hw_strip_config(dev);
-	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2194,10 +2193,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2221,18 +2220,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2242,12 +2241,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2256,12 +2255,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2276,13 +2275,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2291,15 +2290,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2308,39 +2307,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2349,7 +2348,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2373,8 +2372,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2619,15 +2618,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2704,17 +2703,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2728,7 +2727,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2746,17 +2745,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3832,7 +3831,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3842,9 +3841,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3883,21 +3882,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3966,9 +3965,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4211,11 +4210,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4237,8 +4236,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4274,37 +4273,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4521,7 +4520,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4740,13 +4739,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5044,8 +5043,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5092,8 +5091,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5255,22 +5254,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5330,8 +5329,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5568,10 +5567,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5702,12 +5701,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5721,15 +5720,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -6724,15 +6723,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7143,16 +7142,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7162,10 +7161,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7221,7 +7220,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7232,7 +7231,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7256,9 +7255,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7271,7 +7270,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7524,7 +7523,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7556,7 +7555,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7653,12 +7652,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7690,11 +7689,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 950fb2d2450c..876b670f2682 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -114,15 +114,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 27322ab9038a..bdc9d4796c02 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 295e5a39b245..9f1bd0a62ba4 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -104,15 +104,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -263,15 +263,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -674,29 +674,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a51450fe5b82..aa3a406c204d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2592,26 +2592,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2780,7 +2780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3021,7 +3021,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3032,19 +3032,19 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3054,20 +3054,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3122,7 +3122,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3507,23 +3507,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3605,23 +3605,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3697,12 +3697,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3727,7 +3727,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3736,7 +3736,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3752,7 +3752,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3776,7 +3776,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3858,7 +3858,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3874,12 +3874,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3889,7 +3889,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3907,12 +3907,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3922,7 +3922,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3949,7 +3949,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3976,7 +3976,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4145,7 +4145,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4158,8 +4158,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4172,7 +4172,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4183,7 +4183,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4199,15 +4199,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4257,9 +4257,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
 	}
 	if (config_dcb_tx) {
 		/* Only support an equally distributed
@@ -4273,7 +4272,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4309,7 +4308,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4323,7 +4322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4344,12 +4343,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4405,7 +4404,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4526,11 +4525,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4551,17 +4550,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4588,21 +4587,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4613,18 +4612,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4658,7 +4657,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4671,13 +4670,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4885,7 +4884,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4913,10 +4912,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4924,8 +4923,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4939,7 +4938,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4948,7 +4947,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5070,7 +5069,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5107,7 +5106,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5116,7 +5115,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5158,11 +5157,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5177,7 +5176,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5187,7 +5186,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5393,9 +5392,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5681,7 +5680,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5730,7 +5729,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
@@ -5738,8 +5737,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index a1764f2b08af..668a5b9814f6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -227,7 +227,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 005e60668a8b..cd34d4098785 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -277,7 +277,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index ae03ea6e9db3..ac8976062fa7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index 9fa75984fb31..bd528ff346c7 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..eef6f6661c74 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index cb9f7c8e8200..c428caf44189 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 0fc3f0ab66a9..90ffe31b9fda 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -519,10 +519,10 @@ lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
 
-	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				rss_state->itable[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -562,12 +562,12 @@ lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
-		       RTE_RETA_GROUP_SIZE);
+		       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+		       RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -595,17 +595,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -673,42 +673,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -757,7 +757,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -814,7 +814,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -912,10 +912,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -923,18 +923,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1086,8 +1086,8 @@ lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
 
 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
 				  i % eth_dev->data->nb_rx_queues : 0);
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
 	}
@@ -1103,10 +1103,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1484,7 +1484,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1505,11 +1505,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1721,9 +1721,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index 364e818d65c1..8533e39f6957 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 980150293e86..9deb7a5f1360 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -199,7 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
 	dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1381,10 +1381,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index ee2d2b75e59a..781ee256df71 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,12 +682,12 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -703,7 +703,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -832,7 +832,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 	if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
 		uint32_t sges_n;
 
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7d8c4f2a2223..0db2e55befd3 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 111a7597317a..23d9e0a476ac 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1310,8 +1310,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1594,7 +1594,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 7263d354b180..3a9b716e438c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1704,10 +1704,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 42cacd0bbe3b..52f03ada2ced 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1233,7 +1233,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 002449e993e7..d645fd48647e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -298,7 +298,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -560,8 +560,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -569,11 +569,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -584,8 +584,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -593,11 +593,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -659,32 +659,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1100,7 +1100,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1653,14 +1653,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6427,8 +6427,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -7126,7 +7126,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8794,7 +8794,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f1a83d537d0c..4a16f30fb7a6 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -331,18 +331,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5bd90bfa2818..c4a5706532a9 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10862,9 +10862,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10872,9 +10872,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10888,11 +10888,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10900,11 +10900,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14444,9 +14444,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14455,9 +14455,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14466,11 +14466,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14479,11 +14479,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14631,8 +14631,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 892abcb65779..f9010a674d7f 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1824,7 +1824,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1837,7 +1837,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..a4f690039e24 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
@@ -170,8 +170,8 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	/* Fill each entry of the table even if its bit is not set. */
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
 			(*priv->reta_idx)[i];
 	}
 	return 0;
@@ -209,8 +209,8 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
 		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 60673d014d02..14b9991c5fa8 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,22 +333,22 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -362,7 +362,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -694,7 +694,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1336,7 +1336,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pktlen = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
@@ -1439,7 +1439,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1485,7 +1485,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pktlen,
@@ -1500,7 +1500,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pktlen;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1561,9 +1561,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1572,11 +1572,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1606,7 +1606,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1f92250f5edd..02bb9307ae61 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,42 +98,42 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso) {
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
-				offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 		}
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -978,19 +978,19 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 						    MLX5_MAX_TSO_HEADER);
 		txq_ctrl->txq.tso_en = 1;
 	}
-	if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
-	   ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
-	   ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
 	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
 		txq_ctrl->txq.tunnel_en = 1;
-	txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
 				  txq_ctrl->txq.offloads) && (config->swp &
 				  MLX5_SW_PARSING_TSO_CAP)) |
-				((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
 				 txq_ctrl->txq.offloads) && (config->swp &
 				 MLX5_SW_PARSING_CSUM_CAP));
 }
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 31c4d3276053..9a9069da7572 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -485,8 +485,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	if (config->hw_padding) {
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index 2a0288087357..10fe6d828ccd 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,7 +126,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -151,10 +151,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -503,28 +503,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index 126a9a0c11b9..ccb87d518d83 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,14 +54,14 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index 9836bb071a82..62d8aa586dae 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -734,7 +734,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index a6458d2ce9b5..d0746b0d1215 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,15 +58,15 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -442,14 +442,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -483,8 +483,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -502,7 +502,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -524,7 +524,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -623,7 +623,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -644,7 +644,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -664,14 +664,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -893,7 +893,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -929,11 +929,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1202,30 +1202,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1709,11 +1709,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1733,9 +1733,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1864,13 +1864,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1879,7 +1879,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2022,7 +2022,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2182,7 +2182,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2191,10 +2191,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2240,19 +2240,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2329,11 +2329,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3152,7 +3152,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..9c4ae80e7e16 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -306,8 +306,8 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -346,8 +346,8 @@ static int hn_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index 62ba39636cd8..1b63b27e0c3e 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 99d93ebf4667..3c39937816a4 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index 3ebb332ae46c..f76e2ba64621 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 0003fd54dde5..3ea697c54462 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,19 +359,19 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
 	hw->mtu = dev->data->mtu;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -383,13 +383,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -397,7 +397,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -485,14 +485,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -504,15 +504,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -701,26 +701,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -757,22 +757,22 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -843,7 +843,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -973,12 +973,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1018,8 +1018,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1099,8 +1099,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1138,22 +1138,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1223,22 +1223,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 1169ea77a8c7..e08e594b04fe 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -141,7 +141,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 62cb3536e0c9..817fe64dbceb 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -103,7 +103,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 25b9e5b1ce1b..ca03469d0e6d 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -391,9 +391,9 @@ eth_rss_reta_update(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
 		internal->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -416,8 +416,8 @@ eth_rss_reta_query(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 	}
@@ -548,8 +548,8 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
-	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index f578123ed00b..5b8cbec67b5d 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -530,13 +530,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -571,7 +571,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->mtu > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -843,10 +843,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1356,7 +1356,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 3a02824e3948..c493fa7a03ed 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,23 +55,23 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index f491e20e95c1..060d267f5de5 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -665,7 +665,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -692,7 +692,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -707,29 +707,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -768,43 +768,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -914,8 +914,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1848,21 +1848,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2235,7 +2235,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2563,8 +2563,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 4557a0ee1945..a5282c6c1231 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,43 +117,43 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 22a8af5cba45..d5caaa326a5a 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -26,11 +26,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -568,17 +568,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index 7bd1ed6da043..4d40184de46d 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -869,8 +869,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -912,8 +912,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 79b92fda8a4a..91267bbb8182 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1186,7 +1186,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..68cef1caa394 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -85,8 +85,8 @@ otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				rss->ind_tbl[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -118,8 +118,8 @@ otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = rss->ind_tbl[j];
 	}
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index 0d85c898bfe7..2c18483b98fd 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ad704d745b04..135615580bbf 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index 698d22e22685..74dc36a17648 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,14 +33,14 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index aa4dcd33cc79..9338b30672ec 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,7 +954,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index d695c5eef7b0..ec29fd6bc53c 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -136,10 +136,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -659,7 +659,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -714,7 +714,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 4cc002ee8fab..047010e15ed0 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -601,9 +601,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 27f6932dc74e..c907d7fd8312 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1069,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	/* Configure default RETA */
 	memset(reta_conf, 0, sizeof(reta_conf));
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-		id = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		id = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		q = i % QEDE_RSS_COUNT(eth_dev);
 		reta_conf[id].reta[pos] = q;
 	}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1312,7 +1312,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 			return -ENOMEM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1321,8 +1321,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1385,34 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1424,17 +1424,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1461,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1473,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2012,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2041,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2088,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2221,7 +2221,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2245,8 +2245,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 
 	for_each_hwfn(edev, i) {
 		for (j = 0; j < reta_size; j++) {
-			idx = j / RTE_RETA_GROUP_SIZE;
-			shift = j % RTE_RETA_GROUP_SIZE;
+			idx = j / RTE_ETH_RETA_GROUP_SIZE;
+			shift = j % RTE_ETH_RETA_GROUP_SIZE;
 			if (reta_conf[idx].mask & (1ULL << shift)) {
 				entry = reta_conf[idx].reta[shift];
 				fid = entry * edev->num_hwfns + i;
@@ -2282,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift)) {
 			entry = qdev->rss_ind_table[i];
 			reta_conf[idx].reta[shift] = entry;
@@ -2718,16 +2718,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	adapter->ipgre.num_filters = 0;
 	if (is_vf) {
 		adapter->vxlan.enable = true;
-		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
 		adapter->geneve.enable = true;
-		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					      ETH_TUNNEL_FILTER_IVLAN;
+		adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					      RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
 		adapter->ipgre.enable = true;
-		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 	} else {
 		adapter->vxlan.enable = false;
 		adapter->geneve.enable = false;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..440440423a32 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -20,97 +20,97 @@ const struct _qede_udp_tunn_types {
 	const char *string;
 } qede_tunn_types[] = {
 	{
-		ETH_TUNNEL_FILTER_OMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC,
 		ECORE_FILTER_MAC,
 		ECORE_TUNN_CLSS_MAC_VLAN,
 		"outer-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_VNI,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_VLAN,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"outer-mac and vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
 		"vni and inner-mac",
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"vni and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_OIP,
+		RTE_ETH_TUNNEL_FILTER_OIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-IP"
 	},
 	{
-		ETH_TUNNEL_FILTER_IIP,
+		RTE_ETH_TUNNEL_FILTER_IIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"inner-IP"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"OMAC_TENID_IMAC"
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index c2263787b4ec..d585db8b61e8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 0440019e07e1..db10f035dfcb 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 431c42f508d0..9c1be10ac93d 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -106,13 +106,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -121,17 +121,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -401,10 +401,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -899,7 +899,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -908,8 +908,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d958fd642fb1..eeb73a7530ef 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -979,11 +979,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 				  SFC_DP_RX_FEAT_INTR |
 				  SFC_DP_RX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index e166fda888b1..67980a587fe4 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -971,16 +971,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS |
 				  SFC_DP_TX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index f5986b610fff..833d833a0408 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -105,19 +105,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -145,8 +145,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -989,16 +989,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -1029,16 +1029,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1313,7 +1313,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1523,9 +1523,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
@@ -1652,7 +1652,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	/*
 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
-	 * hence, conversion is done here to derive a correct set of ETH_RSS
+	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
 	 * flags which corresponds to the active EFX configuration stored
 	 * locally in 'sfc_adapter' and kept up-to-date
 	 */
@@ -1778,8 +1778,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp = entry / RTE_RETA_GROUP_SIZE;
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
 		if ((reta_conf[grp].mask >> grp_idx) & 1)
 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1828,10 +1828,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 		struct rte_eth_rss_reta_entry64 *grp;
 
-		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
 		if (grp->mask & (1ull << grp_idx)) {
 			if (grp->reta[grp_idx] >= rss->channels) {
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 8096af56739f..be2dfe778a0d 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -392,7 +392,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index 5320d8903dac..27b02b1119fb 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -573,66 +573,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c
index 2500b14cb006..9d88d554c1ba 100644
--- a/drivers/net/sfc/sfc_repr.c
+++ b/drivers/net/sfc/sfc_repr.c
@@ -405,7 +405,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 	}
 
 	switch (conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (nb_rx_queues != 1) {
 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
 				 nb_rx_queues);
@@ -420,7 +420,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 			ret = -EINVAL;
 		}
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		break;
 	default:
 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
@@ -428,7 +428,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 		break;
 	}
 
-	if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
 		sfcr_err(sr, "Tx mode MQ modes not supported");
 		ret = -EINVAL;
 	}
@@ -553,8 +553,8 @@ sfc_repr_dev_link_update(struct rte_eth_dev *dev,
 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
 	} else {
 		memset(&link, 0, sizeof(link));
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c60ef17a922a..23df27c8f45a 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -648,9 +648,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -931,7 +931,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -1140,7 +1140,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1166,15 +1166,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
@@ -1211,7 +1211,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1313,19 +1313,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1645,10 +1645,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1665,16 +1665,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1820,7 +1820,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 13392cdd5a09..0273788c20ce 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -529,23 +529,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -876,9 +876,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1242,13 +1242,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 3c6a285e3c5e..6a084e3e1b1b 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1202,10 +1202,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e4f1ad45219e..5d5350d78e03 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1760,7 +1760,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1768,7 +1768,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2262,7 +2262,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2436,7 +2436,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 8ce9a99dc074..762647e3b6ee 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -390,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -431,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -479,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = tbl[j];
 	}
@@ -509,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				tbl[j] = reta_conf[i].reta[j];
 	}
@@ -807,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -870,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -992,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1382,11 +1382,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1415,10 +1415,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1582,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1711,7 +1711,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	/* Setup scatter mode if needed by jumbo */
 	if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU */
@@ -1896,8 +1896,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1909,8 +1909,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1920,7 +1920,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1955,7 +1955,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2032,8 +2032,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 5d38750d6313..cb474e26b81e 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,32 +16,32 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 7b46ffb68635..0b0f9db7cb2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -998,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1033,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1053,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1138,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1240,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1254,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1275,25 +1275,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1331,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1357,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1378,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1393,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1414,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1429,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1446,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1495,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1694,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1763,8 +1763,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
@@ -1773,20 +1773,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2601,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2634,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -2713,8 +2713,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2733,34 +2733,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2990,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3221,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3359,16 +3359,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3400,16 +3400,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3576,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3605,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4264,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4628,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4639,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4663,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4678,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4908,7 +4908,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4939,7 +4939,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4979,7 +4979,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4987,7 +4987,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4995,7 +4995,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5003,7 +5003,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5035,7 +5035,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5045,7 +5045,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5055,7 +5055,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5065,7 +5065,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index fd65d89ffe7d..8304b68292da 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -60,15 +60,15 @@
 #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 43dc0ed39b75..283b52e8f3db 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -486,14 +486,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -574,22 +574,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -647,8 +647,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -891,10 +891,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index a48972b1a381..30be2873307a 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -101,15 +101,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -256,13 +256,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -611,29 +611,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 7e18dcce0a86..1204dc5499a5 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1960,7 +1960,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1970,34 +1970,34 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2222,32 +2222,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2349,7 +2349,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2599,7 +2599,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2900,20 +2900,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2930,20 +2930,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2984,39 +2984,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3046,7 +3046,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3083,12 +3083,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3103,7 +3103,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3111,7 +3111,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3130,7 +3130,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3151,7 +3151,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3221,7 +3221,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3237,12 +3237,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3252,7 +3252,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3270,12 +3270,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3285,7 +3285,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3312,7 +3312,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3339,7 +3339,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3475,7 +3475,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3486,8 +3486,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3500,7 +3500,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3511,7 +3511,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3527,15 +3527,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3576,7 +3576,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3592,7 +3592,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3634,7 +3634,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3648,7 +3648,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3719,12 +3719,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3780,7 +3780,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3904,11 +3904,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3931,15 +3931,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3962,21 +3962,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3987,18 +3987,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4028,7 +4028,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4038,13 +4038,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4107,10 +4107,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4118,22 +4118,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4273,7 +4273,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4316,7 +4316,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4344,7 +4344,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4354,7 +4354,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4391,11 +4391,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4410,7 +4410,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4419,7 +4419,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4542,8 +4542,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4851,7 +4851,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4903,7 +4903,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4912,8 +4912,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5084,7 +5084,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 86498365e149..17b6a1a1ceec 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -817,7 +817,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -852,7 +852,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1118,7 +1118,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1267,9 +1267,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ddf0e26ab4db..94120b349023 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -712,7 +712,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1774,7 +1774,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1788,7 +1788,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1887,7 +1887,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1958,22 +1958,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2089,14 +2089,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2114,20 +2114,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2139,15 +2139,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2159,12 +2159,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2217,7 +2217,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2244,10 +2244,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2440,7 +2440,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2481,28 +2481,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2515,8 +2515,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2526,8 +2526,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2549,32 +2549,32 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
 		/*
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a19895af1f17..26d9edf5319c 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,20 +41,20 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -398,9 +398,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -486,8 +486,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -547,7 +547,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -843,15 +843,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -863,7 +863,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -930,7 +930,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1039,9 +1039,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1365,7 +1365,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1447,10 +1447,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1503,7 +1503,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1573,8 +1573,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1583,8 +1583,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 8950175460f0..ef858ac9512f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index b01c4c01f9c9..870100fa4f11 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index a26076b312e5..ecafc5e4f1a9 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -70,11 +70,11 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -327,7 +327,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index fd8fd767c811..1087b0dad125 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -114,17 +114,17 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -148,9 +148,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -240,9 +240,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 8c4a8feec0c2..c681e237ea46 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,15 +80,15 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -126,9 +126,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 1bc675962bf3..cdd9e9b60bd8 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index e26be8edf28f..193a16463449 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,13 +283,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -311,12 +311,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 476b147bdfcc..1b841d46ad93 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,13 +614,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -642,9 +642,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index 8a43f6ac0f92..6185b340600c 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -212,9 +212,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index dd8a33d036ee..bfc1949c8428 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index ccfee585f850..b1aa2767a0af 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,12 +819,12 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -852,9 +852,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index d51133199c42..4ffe997baf23 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -148,13 +148,13 @@ static struct rte_eth_conf port_conf = {
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -623,7 +623,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 9ba02e687adb..0290767af473 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -77,11 +77,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 06dc42799314..41e35593867b 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -160,22 +160,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -737,7 +737,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1095,9 +1095,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index a10e330f5003..1c60ac28e317 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -233,19 +233,19 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1444,10 +1444,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1898,7 +1898,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2201,8 +2201,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 	local_port_conf.rxmode.mtu = mtu_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2225,12 +2225,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2288,7 +2288,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2649,7 +2649,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 73391ce1a96d..bdcaa3bcd1ca 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -114,8 +114,8 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -619,7 +619,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 69a0afced6cc..d324ee224109 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -94,7 +94,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,9 +607,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -687,7 +687,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 6e2016752fca..04a3bdace20c 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -215,11 +215,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1807,7 +1807,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2631,9 +2631,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 9040be5ed9b6..cf3d1b8aaf40 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -14,7 +14,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -22,9 +22,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -60,9 +60,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 62981663ea78..d8eabe4c869e 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -93,7 +93,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -725,7 +725,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -868,9 +868,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index af59d51b3ec4..78fc48f781fc 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -82,7 +82,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -477,7 +477,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -649,9 +649,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 8feb50e0f542..c9d8d4918a34 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -605,7 +605,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -791,9 +791,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 410ec94b4131..1fb180723582 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -123,19 +123,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1935,7 +1935,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2003,7 +2003,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2087,9 +2087,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 05385807e83e..7f00c65609ed 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,17 +111,17 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,7 +607,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -731,7 +731,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -828,9 +828,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 39624993b081..21c79567b1f7 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -249,18 +249,18 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -2196,7 +2196,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2509,7 +2509,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2637,9 +2637,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 202ef78b6e95..5dd3e4136ea1 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -119,18 +119,18 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -902,7 +902,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -987,7 +987,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1052,15 +1052,15 @@ l3fwd_poll_resource_setup(void)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index ce8ae059d789..551f0524da79 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -82,7 +82,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -146,7 +146,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -506,7 +506,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -633,9 +633,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index be669c2bcc06..a4d7a3e5436a 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -93,7 +93,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -212,7 +212,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index a66328ba0caf..b35886a77b00 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -175,18 +175,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -217,9 +217,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -391,7 +391,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 4f6982bc1289..b01ac60fd196 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 74e016e1d20d..3a6a33bda3b0 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -306,18 +306,18 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -3437,7 +3437,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3490,7 +3490,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -3589,9 +3589,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 4f20dfc4be06..569207a79d62 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -165,11 +165,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 229a277032cb..979d9eb9e9d0 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -193,14 +193,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index c32d2e12e633..743bae2da50a 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,18 +51,18 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -332,8 +332,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -378,8 +378,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1367569c65db..9b34e4a76b1b 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -60,7 +60,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -105,9 +105,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6845c396b8d9..1903d8b095a1 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -141,17 +141,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index a19934dbe0c8..0e5e3b5a9815 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -95,7 +95,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -114,9 +114,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -276,7 +276,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index fd7207aee758..16435ee3ccc2 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -49,9 +49,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 97218917067e..44376417f83d 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -110,23 +110,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -134,7 +134,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -291,9 +291,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -557,8 +557,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index e19d79a40802..b159291d77ce 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -73,9 +73,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -270,7 +270,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 85996bf864b7..feee642f594d 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -65,12 +65,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -78,7 +78,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -156,11 +156,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -258,9 +258,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index be0179fdeaf0..d2218f2cf741 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -59,8 +59,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -68,11 +68,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -80,7 +80,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -88,12 +88,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -102,7 +102,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -156,7 +156,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -172,11 +172,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -270,9 +270,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -381,9 +381,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -403,9 +403,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index b530ac6e320a..dcbffd4265fa 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -114,7 +114,7 @@ struct rte_eth_dev_data {
 	/** Device Ethernet link address. @see rte_eth_dev_release_port() */
 	struct rte_ether_addr *mac_addrs;
 	/** Bitmap associating MAC addresses to pools */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 	/**
 	 * Device Ethernet MAC addresses of hash filtering.
 	 * @see rte_eth_dev_release_port()
@@ -1700,23 +1700,23 @@ struct rte_eth_syn_filter {
 /**
  * filter type of tunneling packet
  */
-#define ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
-					ETH_TUNNEL_FILTER_TENID | \
-					ETH_TUNNEL_FILTER_IMAC)
+#define RTE_ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
+#define RTE_ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define RTE_ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define RTE_ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
+
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+						RTE_ETH_TUNNEL_FILTER_IVLAN | \
+						RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC (RTE_ETH_TUNNEL_FILTER_OMAC | \
+					       RTE_ETH_TUNNEL_FILTER_TENID | \
+					       RTE_ETH_TUNNEL_FILTER_IMAC)
 
 /**
  *  Select IPv4 or IPv6 for tunnel filters.
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 4ea5a657e003..9b6007803dd8 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -101,9 +101,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -128,14 +125,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1182,32 +1179,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1528,7 +1525,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t max_rx_pktlen;
 		uint32_t overhead_len;
 
@@ -1585,12 +1582,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2213,7 +2210,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * size is supported by the configured device.
 	 */
 	/* Get the real Ethernet overhead length */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t overhead_len;
 		uint32_t max_rx_pktlen;
 		int ret;
@@ -2793,21 +2790,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2831,14 +2828,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3745,7 +3742,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3832,44 +3829,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3914,17 +3911,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -4001,7 +3998,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -4019,7 +4016,7 @@ eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
 {
 	uint16_t i, num;
 
-	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
 	for (i = 0; i < num; i++) {
 		if (reta_conf[i].mask)
 			return 0;
@@ -4041,8 +4038,8 @@ eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
 			RTE_ETHDEV_LOG(ERR,
@@ -4198,7 +4195,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4224,7 +4221,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4365,8 +4362,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -6275,7 +6272,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 21f570832921..1de810d5cdbf 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -250,7 +250,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -281,43 +281,75 @@ struct rte_eth_stats {
 /**@{@name Link speed capabilities
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG     RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED       RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD      RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M         RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD     RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M        RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G          RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G        RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G          RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G         RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G         RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G         RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G         RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G         RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G         RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G        RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G        RTE_ETH_LINK_SPEED_200G
 /**@}*/
 
 /**@{@name Link speed
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE        RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M         RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M        RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G          RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G        RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G          RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G         RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G         RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G         RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G         RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G         RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G         RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G        RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G        RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN     RTE_ETH_SPEED_NUM_UNKNOWN
 /**@}*/
 
 /**
@@ -325,21 +357,27 @@ struct rte_eth_stats {
  */
 __extension__
 struct rte_eth_link {
-	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
-	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
-	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /**@{@name Link negotiation
  * Constants used in link management.
  */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX     RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX     RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN            RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP              RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED           RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG         RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 /**@}*/
 
@@ -356,9 +394,12 @@ struct rte_eth_thresh {
 /**@{@name Multi-queue mode
  * @see rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1 /**< Enable RSS. @see rte_eth_rss_conf */
-#define ETH_MQ_RX_DCB_FLAG  0x2 /**< Enable DCB. */
-#define ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG      RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG      RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG     RTE_ETH_MQ_RX_VMDQ_FLAG
 /**@}*/
 
 /**
@@ -367,50 +408,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB, RSS or VMDq mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For Rx side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For Rx side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDq, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDq */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDq+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDq and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for Rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For Tx side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,     /**< For Tx side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for Tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the Rx features of an Ethernet port.
@@ -423,7 +463,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -438,12 +478,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a VLAN filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -514,38 +559,70 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               RTE_BIT64(2)
-#define ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
-#define ETH_RSS_IPV6               RTE_BIT64(8)
-#define ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
-#define ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
-#define ETH_RSS_IPV6_EX            RTE_BIT64(15)
-#define ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
-#define ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
-#define ETH_RSS_PORT               RTE_BIT64(18)
-#define ETH_RSS_VXLAN              RTE_BIT64(19)
-#define ETH_RSS_GENEVE             RTE_BIT64(20)
-#define ETH_RSS_NVGRE              RTE_BIT64(21)
-#define ETH_RSS_GTPU               RTE_BIT64(23)
-#define ETH_RSS_ETH                RTE_BIT64(24)
-#define ETH_RSS_S_VLAN             RTE_BIT64(25)
-#define ETH_RSS_C_VLAN             RTE_BIT64(26)
-#define ETH_RSS_ESP                RTE_BIT64(27)
-#define ETH_RSS_AH                 RTE_BIT64(28)
-#define ETH_RSS_L2TPV3             RTE_BIT64(29)
-#define ETH_RSS_PFCP               RTE_BIT64(30)
-#define ETH_RSS_PPPOE              RTE_BIT64(31)
-#define ETH_RSS_ECPRI              RTE_BIT64(32)
-#define ETH_RSS_MPLS               RTE_BIT64(33)
-#define ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
+#define ETH_RSS_IPV4                   RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
+#define ETH_RSS_FRAG_IPV4              RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
+#define ETH_RSS_NONFRAG_IPV4_TCP       RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
+#define ETH_RSS_NONFRAG_IPV4_UDP       RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP      RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER     RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
+#define ETH_RSS_IPV6                   RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
+#define ETH_RSS_FRAG_IPV6              RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
+#define ETH_RSS_NONFRAG_IPV6_TCP       RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
+#define ETH_RSS_NONFRAG_IPV6_UDP       RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP      RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER     RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
+#define ETH_RSS_L2_PAYLOAD             RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
+#define ETH_RSS_IPV6_EX                RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
+#define ETH_RSS_IPV6_TCP_EX            RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
+#define ETH_RSS_IPV6_UDP_EX            RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               RTE_BIT64(18)
+#define ETH_RSS_PORT                   RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
+#define ETH_RSS_VXLAN                  RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
+#define ETH_RSS_GENEVE                 RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
+#define ETH_RSS_NVGRE                  RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
+#define ETH_RSS_GTPU                   RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                RTE_BIT64(24)
+#define ETH_RSS_ETH                    RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
+#define ETH_RSS_S_VLAN                 RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
+#define ETH_RSS_C_VLAN                 RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                RTE_BIT64(27)
+#define ETH_RSS_ESP                    RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 RTE_BIT64(28)
+#define ETH_RSS_AH                     RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
+#define ETH_RSS_L2TPV3                 RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
+#define ETH_RSS_PFCP                   RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
+#define ETH_RSS_PPPOE                  RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
+#define ETH_RSS_ECPRI                  RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
+#define ETH_RSS_MPLS                   RTE_ETH_RSS_MPLS
+#define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define ETH_RSS_IPV4_CHKSUM            RTE_ETH_RSS_IPV4_CHKSUM
 
 /**
  * The ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
@@ -554,41 +631,48 @@ struct rte_eth_rss_conf {
  * checksum type for constructing the use of RSS offload bits.
  *
  * Due to above reason, some old APIs (and configuration) don't support
- * ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
+ * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
  *
  * For the case that checksum is not used in an UDP header,
  * it takes the reserved value 0 as input for the hash function.
  */
-#define ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define ETH_RSS_L4_CHKSUM              RTE_ETH_RSS_L4_CHKSUM
 
 /*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
  * more specific input set selection. These bits are defined starting
  * from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
-#define ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
-#define ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
-#define ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
-#define ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
-#define ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
+#define ETH_RSS_L3_SRC_ONLY            RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
+#define ETH_RSS_L3_DST_ONLY            RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
+#define ETH_RSS_L4_SRC_ONLY            RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
+#define ETH_RSS_L4_DST_ONLY            RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
+#define ETH_RSS_L2_SRC_ONLY            RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define ETH_RSS_L2_DST_ONLY            RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
- * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * https:tools.ietf.org/html/rfc6052
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
  */
-#define RTE_ETH_RSS_L3_PRE32	   RTE_BIT64(57)
-#define RTE_ETH_RSS_L3_PRE40	   RTE_BIT64(56)
-#define RTE_ETH_RSS_L3_PRE48	   RTE_BIT64(55)
-#define RTE_ETH_RSS_L3_PRE56	   RTE_BIT64(54)
-#define RTE_ETH_RSS_L3_PRE64	   RTE_BIT64(53)
-#define RTE_ETH_RSS_L3_PRE96	   RTE_BIT64(52)
+#define RTE_ETH_RSS_L3_PRE32           RTE_BIT64(57)
+#define RTE_ETH_RSS_L3_PRE40           RTE_BIT64(56)
+#define RTE_ETH_RSS_L3_PRE48           RTE_BIT64(55)
+#define RTE_ETH_RSS_L3_PRE56           RTE_BIT64(54)
+#define RTE_ETH_RSS_L3_PRE64           RTE_BIT64(53)
+#define RTE_ETH_RSS_L3_PRE96           RTE_BIT64(52)
 
 /*
  * Use the following macros to combine with the above layers
@@ -603,22 +687,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT  (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT      RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST    (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST        RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST    (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST        RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK         (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK             RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)          RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -633,219 +722,312 @@ struct rte_eth_rss_conf {
 static inline uint64_t
 rte_eth_rss_hf_refine(uint64_t rss_hf)
 {
-	if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 
-	if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /** Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64      RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128     RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256     RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512     RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE       RTE_ETH_RETA_GROUP_SIZE
 
 /**@{@name VMDq and DCB maximums */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS       RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES     RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES         RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES              RTE_ETH_DCB_NUM_QUEUES
 /**@}*/
 
 /**@{@name DCB capabilities */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT          RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT         RTE_ETH_DCB_PFC_SUPPORT
 /**@}*/
 
 /**@{@name VLAN offload bits */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
-
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD       RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD      RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD      RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD       RTE_ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK      0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK          RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK     0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK         RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK     0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK         RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK      0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK          RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX              RTE_ETH_VLAN_ID_MAX
 /**@}*/
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR       RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY     RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /**@{@name VMDq Rx mode
  * @see rte_eth_vmdq_rx_conf.rx_mode
  */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG      0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG          RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC    0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC        RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC    0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC        RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST  0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST      RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST  0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST      RTE_ETH_VMDQ_ACCEPT_MULTICAST
 /**@}*/
 
+/** Maximum nb. of vlan per mirror rule */
+#define RTE_ETH_MIRROR_MAX_VLANS       64
+#define ETH_MIRROR_MAX_VLANS           RTE_ETH_MIRROR_MAX_VLANS
+
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP    0x01  /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP        RTE_ETH_MIRROR_VIRTUAL_POOL_UP
+#define RTE_ETH_MIRROR_UPLINK_PORT        0x02  /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT            RTE_ETH_MIRROR_UPLINK_PORT
+#define RTE_ETH_MIRROR_DOWNLINK_PORT      0x04  /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT          RTE_ETH_MIRROR_DOWNLINK_PORT
+#define RTE_ETH_MIRROR_VLAN               0x08  /**< VLAN Mirroring. */
+#define ETH_MIRROR_VLAN                   RTE_ETH_MIRROR_VLAN
+#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN  0x10  /**< Virtual Pool downlink Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN      RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
+
+/**
+ * A structure used to configure VLAN traffic mirror of an Ethernet port.
+ */
+struct rte_eth_vlan_mirror {
+	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
+	/** VLAN ID list for vlan mirroring. */
+	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
+};
+
+/**
+ * A structure used to configure traffic mirror of an Ethernet port.
+ */
+struct rte_eth_mirror_conf {
+	uint8_t rule_type;  /**< Mirroring rule type */
+	uint8_t dst_pool;   /**< Destination pool for this mirror rule. */
+	uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
+	/** VLAN ID setting for VLAN mirroring. */
+	struct rte_eth_vlan_mirror vlan;
+};
+
 /**
  * A structure used to configure 64 entries of Redirection Table of the
  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
@@ -856,7 +1038,7 @@ struct rte_eth_rss_reta_entry64 {
 	/** Mask bits indicate which entries need to be updated/queried. */
 	uint64_t mask;
 	/** Group of 64 redirection table entries. */
-	uint16_t reta[RTE_RETA_GROUP_SIZE];
+	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
 };
 
 /**
@@ -864,38 +1046,44 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDq configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_dcb_tx_conf {
 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_dcb_tx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_tx_conf {
@@ -921,9 +1109,9 @@ struct rte_eth_vmdq_dcb_conf {
 	struct {
 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
 	/** Selects a queue in a pool */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 /**
@@ -933,7 +1121,7 @@ struct rte_eth_vmdq_dcb_conf {
  * Using this feature, packets are routed to a pool of queues. By default,
  * the pool selection is based on the MAC address, the VLAN ID in the
  * VLAN tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
  * selection using only the MAC address. MAC address to pool mapping is done
  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
  * corresponding to the pool ID.
@@ -954,7 +1142,7 @@ struct rte_eth_vmdq_rx_conf {
 	struct {
 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
 };
 
 /**
@@ -963,7 +1151,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1055,7 +1243,7 @@ struct rte_eth_rxconf {
 	uint16_t share_group;
 	uint16_t share_qid; /**< Shared Rx queue ID in group */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1084,7 +1272,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1195,12 +1383,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
-	RTE_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
+	RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1231,18 +1424,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1250,11 +1454,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in Rx descriptors.
@@ -1271,9 +1480,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** Rx queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1282,6 +1491,8 @@ struct rte_fdir_conf {
 	struct rte_eth_fdir_flex_conf flex_conf;
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1299,7 +1510,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1308,18 +1519,20 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the Rx multi-queue mode, extra advanced
  * configuration settings may be needed.
  */
 struct rte_eth_conf {
-	uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
-				used. ETH_LINK_SPEED_FIXED disables link
+	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+				used. RTE_ETH_LINK_SPEED_FIXED disables link
 				autonegotiation, and a unique speed shall be
 				set. Otherwise, the bitmap defines the set of
 				speeds to be advertised. If the special value
-				ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
 				supported are advertised. */
 	struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
 	struct rte_eth_txmode txmode; /**< Port Tx configuration. */
@@ -1346,47 +1559,67 @@ struct rte_eth_conf {
 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
 	} tx_adv_conf; /**< Port Tx DCB configuration (union). */
 	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
-	    is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+	    is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
 	uint32_t dcb_capability_en;
-	struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
-	struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+	struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
 };
 
 /**
  * Rx offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP           RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM           RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM            RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM            RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO          0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO              RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP           RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP         RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT     0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT         RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER          RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND          RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER          0x00002000
+#define DEV_RX_OFFLOAD_SCATTER              RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
-#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
-
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP        0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP            RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY         0x00008000
+#define DEV_RX_OFFLOAD_SECURITY             RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC         0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC             RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM           RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM      RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH         0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH             RTE_ETH_RX_OFFLOAD_RSS_HASH
+#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     0x00100000
+
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1396,54 +1629,76 @@ struct rte_eth_conf {
 /**
  * Tx offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT          RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM           RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM            RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM            RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM           RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO          0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO              RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO          0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO              RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT          RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO        RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO          RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO         RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT        RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
 /**
  * Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * Tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE          RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /** Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS           RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**
  * Device supports optimization for fast release of mbufs.
  * When set application must guarantee that per-queue all mbufs comes from
  * the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY             RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO          RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO           RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM      RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP     RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1493,7 +1748,7 @@ struct rte_eth_dev_portconf {
  * Default values for switch domain ID when ethdev does not support switch
  * domain definitions.
  */
-#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID	(UINT16_MAX)
+#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID   (UINT16_MAX)
 
 /**
  * Ethernet device associated switch information
@@ -1591,7 +1846,7 @@ struct rte_eth_dev_info {
 	uint16_t vmdq_pool_base;  /**< First ID of VMDq pools. */
 	struct rte_eth_desc_lim rx_desc_lim;  /**< Rx descriptors limits */
 	struct rte_eth_desc_lim tx_desc_lim;  /**< Tx descriptors limits */
-	uint32_t speed_capa;  /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 	/** Configured number of Rx/Tx queues */
 	uint16_t nb_rx_queues; /**< Number of Rx queues. */
 	uint16_t nb_tx_queues; /**< Number of Tx queues. */
@@ -1695,8 +1950,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS        RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL      RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1707,12 +1964,12 @@ struct rte_eth_dcb_tc_queue_mapping {
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 	/** Rx queues assigned to tc per Pool */
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 };
 
 /**
@@ -1721,8 +1978,8 @@ struct rte_eth_dcb_tc_queue_mapping {
  */
 struct rte_eth_dcb_info {
 	uint8_t nb_tcs;        /**< number of TCs */
-	uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
-	uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
+	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
 	/** Rx queues assigned to tc */
 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
 };
@@ -1746,7 +2003,7 @@ enum rte_eth_fec_mode {
 
 /* A structure used to get capabilities per link speed */
 struct rte_eth_fec_capa {
-	uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
 	uint32_t capa;  /**< FEC capabilities bitmask */
 };
 
@@ -1769,13 +2026,17 @@ struct rte_eth_fec_capa {
 
 /**@{@name L2 tunnel configuration */
 /** L2 tunnel enable mask */
-#define ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define RTE_ETH_L2_TUNNEL_ENABLE_MASK       0x00000001
+#define ETH_L2_TUNNEL_ENABLE_MASK           RTE_ETH_L2_TUNNEL_ENABLE_MASK
 /** L2 tunnel insertion mask */
-#define ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define RTE_ETH_L2_TUNNEL_INSERTION_MASK    0x00000002
+#define ETH_L2_TUNNEL_INSERTION_MASK        RTE_ETH_L2_TUNNEL_INSERTION_MASK
 /** L2 tunnel stripping mask */
-#define ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define RTE_ETH_L2_TUNNEL_STRIPPING_MASK    0x00000004
+#define ETH_L2_TUNNEL_STRIPPING_MASK        RTE_ETH_L2_TUNNEL_STRIPPING_MASK
 /** L2 tunnel forwarding mask */
-#define ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define RTE_ETH_L2_TUNNEL_FORWARDING_MASK   0x00000008
+#define ETH_L2_TUNNEL_FORWARDING_MASK       RTE_ETH_L2_TUNNEL_FORWARDING_MASK
 /**@}*/
 
 /**
@@ -2086,14 +2347,14 @@ uint16_t rte_eth_dev_count_total(void);
  * @param speed
  *   Numerical speed value in Mbps
  * @param duplex
- *   ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
  * @return
  *   0 if the speed cannot be mapped
  */
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2103,7 +2364,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2211,7 +2472,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -2788,7 +3049,7 @@ const char *rte_eth_link_speed_to_str(uint32_t link_speed);
  *
  * @param str
  *   A pointer to a string to be filled with textual representation of
- *   device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
  *   store default link status text.
  * @param len
  *   Length of available memory at 'str' string.
@@ -3334,10 +3595,10 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  *   The port identifier of the Ethernet device.
  * @param offload_mask
  *   The VLAN Offload bit mask can be mixed use with "OR"
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
@@ -3353,10 +3614,10 @@ int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
  *   The port identifier of the Ethernet device.
  * @return
  *   - (>0) if successful. Bit mask to indicate
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  *   - (-ENODEV) if *port_id* invalid.
  */
 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
@@ -5382,7 +5643,7 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same Tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index db3392bf9759..59d9d9eeb63f 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2957,7 +2957,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index fdaaaf67f2f3..57e871201816 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -185,7 +185,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -208,7 +208,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -254,7 +254,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -267,7 +267,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index fb03cf1dcf90..29abe8da53cf 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v6] ethdev: add namespace
  2021-10-22  2:02         ` [dpdk-dev] [PATCH v6] " Ferruh Yigit
@ 2021-10-22  6:44           ` Andrew Rybchenko
  2021-10-22  8:25             ` Ferruh Yigit
  2021-10-22  9:48           ` Pattan, Reshma
  2021-10-22 11:03           ` [dpdk-dev] [PATCH v7] " Ferruh Yigit
  2 siblings, 1 reply; 32+ messages in thread
From: Andrew Rybchenko @ 2021-10-22  6:44 UTC (permalink / raw)
  To: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Chandubabu Namburu, Rasesh Mody, Shahed Shaikh, Bruce Richardson,
	Konstantin Ananyev, Ruifeng Wang, Rahul Lakkireddy,
	Marcin Wojtas, Michal Krawczyk, Shai Brandes, Evgeny Schemeilin,
	Igor Chauskin, Gagandeep Singh, Gaetan Rivet, Ziyang Xuan,
	Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou, Jingjing Wu,
	Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: dev, Tyler Retzlaff, David Marchand

On 10/22/21 5:02 AM, Ferruh Yigit wrote:
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> way. The macros for backward compatibility can be removed in next LTS.
> Also updated some struct names to have 'rte_eth' prefix.
> 
> All internal components switched to using new names.
> 
> Syntax fixed on lines that this patch touches.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Acked-by: Jerin Jacob <jerinj@marvell.com>
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
> Acked-by: Rosen Xu <rosen.xu@intel.com>
> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
> Cc: David Marchand <david.marchand@redhat.com>
> Cc: Thomas Monjalon <thomas@monjalon.net>
> 
> v2:
> * Updated internal components
> * Removed deprecation notice
> 
> v3:
> * Updated missing macros / structs that David highlighted
> * Added release notes update
> 
> v4:
> * rebased on latest next-net
> * depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
> * Not able to complete scripts to update user code, although some
>   shared by Aman:
>   https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
>   Sending new version for possible option to get this patch for -rc1 and
>   work for scripts later, before release.
> 
> v5:
> * rebased on latest next-net
> 
> v6:
> * rebased on latest next-net

[snip]

>  
> +/** Maximum nb. of vlan per mirror rule */
> +#define RTE_ETH_MIRROR_MAX_VLANS       64
> +#define ETH_MIRROR_MAX_VLANS           RTE_ETH_MIRROR_MAX_VLANS
> +
> +#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP    0x01  /**< Virtual Pool uplink Mirroring. */
> +#define ETH_MIRROR_VIRTUAL_POOL_UP        RTE_ETH_MIRROR_VIRTUAL_POOL_UP
> +#define RTE_ETH_MIRROR_UPLINK_PORT        0x02  /**< Uplink Port Mirroring. */
> +#define ETH_MIRROR_UPLINK_PORT            RTE_ETH_MIRROR_UPLINK_PORT
> +#define RTE_ETH_MIRROR_DOWNLINK_PORT      0x04  /**< Downlink Port Mirroring. */
> +#define ETH_MIRROR_DOWNLINK_PORT          RTE_ETH_MIRROR_DOWNLINK_PORT
> +#define RTE_ETH_MIRROR_VLAN               0x08  /**< VLAN Mirroring. */
> +#define ETH_MIRROR_VLAN                   RTE_ETH_MIRROR_VLAN
> +#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN  0x10  /**< Virtual Pool downlink Mirroring. */
> +#define ETH_MIRROR_VIRTUAL_POOL_DOWN      RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
> +
> +/**
> + * A structure used to configure VLAN traffic mirror of an Ethernet port.
> + */
> +struct rte_eth_vlan_mirror {
> +	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
> +	/** VLAN ID list for vlan mirroring. */
> +	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
> +};
> +
> +/**
> + * A structure used to configure traffic mirror of an Ethernet port.
> + */
> +struct rte_eth_mirror_conf {
> +	uint8_t rule_type;  /**< Mirroring rule type */
> +	uint8_t dst_pool;   /**< Destination pool for this mirror rule. */
> +	uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
> +	/** VLAN ID setting for VLAN mirroring. */
> +	struct rte_eth_vlan_mirror vlan;
> +};
> +

I guess above is an incorrect merge which returns mirror
structures and defines back.

Andrew.


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v6] ethdev: add namespace
  2021-10-22  6:44           ` Andrew Rybchenko
@ 2021-10-22  8:25             ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-22  8:25 UTC (permalink / raw)
  To: Andrew Rybchenko, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Haiyue Wang,
	Beilei Xing, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Chandubabu Namburu, Rasesh Mody, Shahed Shaikh, Bruce Richardson,
	Konstantin Ananyev, Ruifeng Wang, Rahul Lakkireddy,
	Marcin Wojtas, Michal Krawczyk, Shai Brandes, Evgeny Schemeilin,
	Igor Chauskin, Gagandeep Singh, Gaetan Rivet, Ziyang Xuan,
	Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou, Jingjing Wu,
	Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: dev, Tyler Retzlaff, David Marchand

On 10/22/2021 7:44 AM, Andrew Rybchenko wrote:
> On 10/22/21 5:02 AM, Ferruh Yigit wrote:
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>> way. The macros for backward compatibility can be removed in next LTS.
>> Also updated some struct names to have 'rte_eth' prefix.
>>
>> All internal components switched to using new names.
>>
>> Syntax fixed on lines that this patch touches.
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
>> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
>> Acked-by: Jerin Jacob <jerinj@marvell.com>
>> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
>> Acked-by: Rosen Xu <rosen.xu@intel.com>
>> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
>> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>> ---
>> Cc: David Marchand <david.marchand@redhat.com>
>> Cc: Thomas Monjalon <thomas@monjalon.net>
>>
>> v2:
>> * Updated internal components
>> * Removed deprecation notice
>>
>> v3:
>> * Updated missing macros / structs that David highlighted
>> * Added release notes update
>>
>> v4:
>> * rebased on latest next-net
>> * depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
>> * Not able to complete scripts to update user code, although some
>>    shared by Aman:
>>    https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
>>    Sending new version for possible option to get this patch for -rc1 and
>>    work for scripts later, before release.
>>
>> v5:
>> * rebased on latest next-net
>>
>> v6:
>> * rebased on latest next-net
> 
> [snip]
> 
>>   
>> +/** Maximum nb. of vlan per mirror rule */
>> +#define RTE_ETH_MIRROR_MAX_VLANS       64
>> +#define ETH_MIRROR_MAX_VLANS           RTE_ETH_MIRROR_MAX_VLANS
>> +
>> +#define RTE_ETH_MIRROR_VIRTUAL_POOL_UP    0x01  /**< Virtual Pool uplink Mirroring. */
>> +#define ETH_MIRROR_VIRTUAL_POOL_UP        RTE_ETH_MIRROR_VIRTUAL_POOL_UP
>> +#define RTE_ETH_MIRROR_UPLINK_PORT        0x02  /**< Uplink Port Mirroring. */
>> +#define ETH_MIRROR_UPLINK_PORT            RTE_ETH_MIRROR_UPLINK_PORT
>> +#define RTE_ETH_MIRROR_DOWNLINK_PORT      0x04  /**< Downlink Port Mirroring. */
>> +#define ETH_MIRROR_DOWNLINK_PORT          RTE_ETH_MIRROR_DOWNLINK_PORT
>> +#define RTE_ETH_MIRROR_VLAN               0x08  /**< VLAN Mirroring. */
>> +#define ETH_MIRROR_VLAN                   RTE_ETH_MIRROR_VLAN
>> +#define RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN  0x10  /**< Virtual Pool downlink Mirroring. */
>> +#define ETH_MIRROR_VIRTUAL_POOL_DOWN      RTE_ETH_MIRROR_VIRTUAL_POOL_DOWN
>> +
>> +/**
>> + * A structure used to configure VLAN traffic mirror of an Ethernet port.
>> + */
>> +struct rte_eth_vlan_mirror {
>> +	uint64_t vlan_mask; /**< mask for valid VLAN ID. */
>> +	/** VLAN ID list for vlan mirroring. */
>> +	uint16_t vlan_id[RTE_ETH_MIRROR_MAX_VLANS];
>> +};
>> +
>> +/**
>> + * A structure used to configure traffic mirror of an Ethernet port.
>> + */
>> +struct rte_eth_mirror_conf {
>> +	uint8_t rule_type;  /**< Mirroring rule type */
>> +	uint8_t dst_pool;   /**< Destination pool for this mirror rule. */
>> +	uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
>> +	/** VLAN ID setting for VLAN mirroring. */
>> +	struct rte_eth_vlan_mirror vlan;
>> +};
>> +
> 
> I guess above is an incorrect merge which returns mirror
> structures and defines back.
> 

Yes it it, I will fix, thanks.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v6] ethdev: add namespace
  2021-10-22  2:02         ` [dpdk-dev] [PATCH v6] " Ferruh Yigit
  2021-10-22  6:44           ` Andrew Rybchenko
@ 2021-10-22  9:48           ` Pattan, Reshma
  2021-10-22 11:03           ` [dpdk-dev] [PATCH v7] " Ferruh Yigit
  2 siblings, 0 replies; 32+ messages in thread
From: Pattan, Reshma @ 2021-10-22  9:48 UTC (permalink / raw)
  To: Yigit, Ferruh, Tahhan, Maryam, Jerin Jacob, Wisam Jaddo,
	Dumitrescu, Cristian, Li, Xiaoyun, Thomas Monjalon,
	Andrew Rybchenko, Jayatheerthan, Jay, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	Daley, John, Hyong Youb Kim, Zhang, Qi Z, Wang, Xiao W, Wang,
	Haiyue, Xing, Beilei, Matan Azrad, Viacheslav Ovsiienko, Wiles,
	Keith, Hu, Jiayu, Olivier Matz, Ori Kam, Akhil Goyal, Doherty,
	Declan, Ray Kinsella, Nicolau, Radu, Hemant Agrawal,
	Sachin Saxena, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, John W. Linville, Loftus, Ciara,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Steven Webster, Peters, Matt, Chandubabu Namburu, Rasesh Mody,
	Shahed Shaikh, Richardson, Bruce, Ananyev, Konstantin,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Wu, Jingjing, Yang, Qiming, Andrew Boyer,
	Xu, Rosen, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Singh, Jasvinder, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Xia, Chenbo, Yong Wang, Chautru, Nicolas, Hunt,
	David, Van Haaren, Harry, Iremonger, Bernard, Burakov, Anatoly,
	Mcnamara, John, Rybalchenko, Kirill, Marohn, Byron, Wang,
	Yipeng1
  Cc: dev, Tyler Retzlaff, David Marchand



> -----Original Message-----
> From: Yigit, Ferruh <ferruh.yigit@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>


>  app/proc-info/main.c                          |    8 +-
>  examples/l3fwd-power/main.c                   |   16 +-

Acked-by: Reshma Pattan <reshma.pattan@intel.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22  2:02         ` [dpdk-dev] [PATCH v6] " Ferruh Yigit
  2021-10-22  6:44           ` Andrew Rybchenko
  2021-10-22  9:48           ` Pattan, Reshma
@ 2021-10-22 11:03           ` Ferruh Yigit
  2021-10-22 11:28             ` Andrew Rybchenko
  2021-11-01  9:23             ` Jiawen Wu
  2 siblings, 2 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-22 11:03 UTC (permalink / raw)
  To: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Andrew Rybchenko, Jay Jayatheerthan, Chas Williams,
	Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Chandubabu Namburu, Rasesh Mody, Shahed Shaikh, Bruce Richardson,
	Konstantin Ananyev, Ruifeng Wang, Rahul Lakkireddy,
	Marcin Wojtas, Michal Krawczyk, Shai Brandes, Evgeny Schemeilin,
	Igor Chauskin, Gagandeep Singh, Gaetan Rivet, Ziyang Xuan,
	Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou, Jingjing Wu,
	Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: Ferruh Yigit, dev, Tyler Retzlaff, David Marchand

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=true, Size: 1213541 bytes --]

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
Cc: David Marchand <david.marchand@redhat.com>
Cc: Thomas Monjalon <thomas@monjalon.net>

v2:
* Updated internal components
* Removed deprecation notice

v3:
* Updated missing macros / structs that David highlighted
* Added release notes update

v4:
* rebased on latest next-net
* depends on https://patches.dpdk.org/user/todo/dpdk/?series=19744
* Not able to complete scripts to update user code, although some
  shared by Aman:
  https://patches.dpdk.org/project/dpdk/patch/20211008102949.70716-1-aman.deep.singh@intel.com/
  Sending new version for possible option to get this patch for -rc1 and
  work for scripts later, before release.

v5:
* rebased on latest next-net

v6:
* rebased on latest next-net

v7:
* Remove mirror structures which are rebase residue
* rebased on latest next-net
---
 app/proc-info/main.c                          |   8 +-
 app/test-eventdev/test_perf_common.c          |   4 +-
 app/test-eventdev/test_pipeline_common.c      |  10 +-
 app/test-flow-perf/config.h                   |   2 +-
 app/test-pipeline/init.c                      |   8 +-
 app/test-pmd/cmdline.c                        | 286 ++---
 app/test-pmd/config.c                         | 200 ++--
 app/test-pmd/csumonly.c                       |  28 +-
 app/test-pmd/flowgen.c                        |   6 +-
 app/test-pmd/macfwd.c                         |   6 +-
 app/test-pmd/macswap_common.h                 |   6 +-
 app/test-pmd/parameters.c                     |  54 +-
 app/test-pmd/testpmd.c                        |  52 +-
 app/test-pmd/testpmd.h                        |   2 +-
 app/test-pmd/txonly.c                         |   6 +-
 app/test/test_ethdev_link.c                   |  68 +-
 app/test/test_event_eth_rx_adapter.c          |   4 +-
 app/test/test_kni.c                           |   2 +-
 app/test/test_link_bonding.c                  |   4 +-
 app/test/test_link_bonding_mode4.c            |   4 +-
 app/test/test_link_bonding_rssconf.c          |  28 +-
 app/test/test_pmd_perf.c                      |  12 +-
 app/test/virtual_pmd.c                        |  10 +-
 doc/guides/eventdevs/cnxk.rst                 |   2 +-
 doc/guides/eventdevs/octeontx2.rst            |   2 +-
 doc/guides/nics/af_packet.rst                 |   2 +-
 doc/guides/nics/bnxt.rst                      |  24 +-
 doc/guides/nics/enic.rst                      |   2 +-
 doc/guides/nics/features.rst                  | 114 +-
 doc/guides/nics/fm10k.rst                     |   6 +-
 doc/guides/nics/intel_vf.rst                  |  10 +-
 doc/guides/nics/ixgbe.rst                     |  12 +-
 doc/guides/nics/mlx5.rst                      |   4 +-
 doc/guides/nics/tap.rst                       |   2 +-
 .../generic_segmentation_offload_lib.rst      |   8 +-
 doc/guides/prog_guide/mbuf_lib.rst            |  18 +-
 doc/guides/prog_guide/poll_mode_drv.rst       |   8 +-
 doc/guides/prog_guide/rte_flow.rst            |  34 +-
 doc/guides/prog_guide/rte_security.rst        |   2 +-
 doc/guides/rel_notes/deprecation.rst          |  10 +-
 doc/guides/rel_notes/release_21_11.rst        |   3 +
 doc/guides/sample_app_ug/ipsec_secgw.rst      |   4 +-
 doc/guides/testpmd_app_ug/run_app.rst         |   2 +-
 drivers/bus/dpaa/include/process.h            |  16 +-
 drivers/common/cnxk/roc_npc.h                 |   2 +-
 drivers/net/af_packet/rte_eth_af_packet.c     |  20 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |  12 +-
 drivers/net/ark/ark_ethdev.c                  |  16 +-
 drivers/net/atlantic/atl_ethdev.c             |  88 +-
 drivers/net/atlantic/atl_ethdev.h             |  18 +-
 drivers/net/atlantic/atl_rxtx.c               |   6 +-
 drivers/net/avp/avp_ethdev.c                  |  26 +-
 drivers/net/axgbe/axgbe_dev.c                 |   6 +-
 drivers/net/axgbe/axgbe_ethdev.c              | 104 +-
 drivers/net/axgbe/axgbe_ethdev.h              |  12 +-
 drivers/net/axgbe/axgbe_mdio.c                |   2 +-
 drivers/net/axgbe/axgbe_rxtx.c                |   6 +-
 drivers/net/bnx2x/bnx2x_ethdev.c              |  12 +-
 drivers/net/bnxt/bnxt.h                       |  62 +-
 drivers/net/bnxt/bnxt_ethdev.c                | 172 +--
 drivers/net/bnxt/bnxt_flow.c                  |   6 +-
 drivers/net/bnxt/bnxt_hwrm.c                  | 112 +-
 drivers/net/bnxt/bnxt_reps.c                  |   2 +-
 drivers/net/bnxt/bnxt_ring.c                  |   4 +-
 drivers/net/bnxt/bnxt_rxq.c                   |  28 +-
 drivers/net/bnxt/bnxt_rxr.c                   |   4 +-
 drivers/net/bnxt/bnxt_rxtx_vec_avx2.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h       |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c         |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c          |   2 +-
 drivers/net/bnxt/bnxt_txr.c                   |   4 +-
 drivers/net/bnxt/bnxt_vnic.c                  |  30 +-
 drivers/net/bnxt/rte_pmd_bnxt.c               |   8 +-
 drivers/net/bonding/eth_bond_private.h        |   4 +-
 drivers/net/bonding/rte_eth_bond_8023ad.c     |  16 +-
 drivers/net/bonding/rte_eth_bond_api.c        |   6 +-
 drivers/net/bonding/rte_eth_bond_pmd.c        |  50 +-
 drivers/net/cnxk/cn10k_ethdev.c               |  42 +-
 drivers/net/cnxk/cn10k_rte_flow.c             |   2 +-
 drivers/net/cnxk/cn10k_rx.c                   |   4 +-
 drivers/net/cnxk/cn10k_tx.c                   |   4 +-
 drivers/net/cnxk/cn9k_ethdev.c                |  60 +-
 drivers/net/cnxk/cn9k_rx.c                    |   4 +-
 drivers/net/cnxk/cn9k_tx.c                    |   4 +-
 drivers/net/cnxk/cnxk_ethdev.c                | 112 +-
 drivers/net/cnxk/cnxk_ethdev.h                |  49 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c        |   6 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c            | 106 +-
 drivers/net/cnxk/cnxk_link.c                  |  14 +-
 drivers/net/cnxk/cnxk_ptp.c                   |   4 +-
 drivers/net/cnxk/cnxk_rte_flow.c              |   2 +-
 drivers/net/cxgbe/cxgbe.h                     |  46 +-
 drivers/net/cxgbe/cxgbe_ethdev.c              |  42 +-
 drivers/net/cxgbe/cxgbe_main.c                |  12 +-
 drivers/net/dpaa/dpaa_ethdev.c                | 180 ++--
 drivers/net/dpaa/dpaa_ethdev.h                |  10 +-
 drivers/net/dpaa/dpaa_flow.c                  |  32 +-
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c        |  47 +-
 drivers/net/dpaa2/dpaa2_ethdev.c              | 138 +--
 drivers/net/dpaa2/dpaa2_ethdev.h              |  22 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   8 +-
 drivers/net/e1000/e1000_ethdev.h              |  18 +-
 drivers/net/e1000/em_ethdev.c                 |  64 +-
 drivers/net/e1000/em_rxtx.c                   |  38 +-
 drivers/net/e1000/igb_ethdev.c                | 158 +--
 drivers/net/e1000/igb_pf.c                    |   2 +-
 drivers/net/e1000/igb_rxtx.c                  | 116 +--
 drivers/net/ena/ena_ethdev.c                  |  70 +-
 drivers/net/ena/ena_ethdev.h                  |   4 +-
 drivers/net/ena/ena_rss.c                     |  74 +-
 drivers/net/enetc/enetc_ethdev.c              |  30 +-
 drivers/net/enic/enic.h                       |   2 +-
 drivers/net/enic/enic_ethdev.c                |  88 +-
 drivers/net/enic/enic_main.c                  |  40 +-
 drivers/net/enic/enic_res.c                   |  50 +-
 drivers/net/failsafe/failsafe.c               |   8 +-
 drivers/net/failsafe/failsafe_intr.c          |   4 +-
 drivers/net/failsafe/failsafe_ops.c           |  78 +-
 drivers/net/fm10k/fm10k.h                     |   4 +-
 drivers/net/fm10k/fm10k_ethdev.c              | 146 +--
 drivers/net/fm10k/fm10k_rxtx_vec.c            |   6 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c      |  22 +-
 drivers/net/hinic/hinic_pmd_ethdev.c          | 136 +--
 drivers/net/hinic/hinic_pmd_rx.c              |  36 +-
 drivers/net/hinic/hinic_pmd_rx.h              |  22 +-
 drivers/net/hns3/hns3_dcb.c                   |  14 +-
 drivers/net/hns3/hns3_ethdev.c                | 352 +++----
 drivers/net/hns3/hns3_ethdev.h                |  12 +-
 drivers/net/hns3/hns3_ethdev_vf.c             | 100 +-
 drivers/net/hns3/hns3_flow.c                  |   6 +-
 drivers/net/hns3/hns3_ptp.c                   |   2 +-
 drivers/net/hns3/hns3_rss.c                   | 108 +-
 drivers/net/hns3/hns3_rss.h                   |  28 +-
 drivers/net/hns3/hns3_rxtx.c                  |  30 +-
 drivers/net/hns3/hns3_rxtx.h                  |   2 +-
 drivers/net/hns3/hns3_rxtx_vec.c              |  10 +-
 drivers/net/i40e/i40e_ethdev.c                | 272 ++---
 drivers/net/i40e/i40e_ethdev.h                |  24 +-
 drivers/net/i40e/i40e_flow.c                  |  32 +-
 drivers/net/i40e/i40e_hash.c                  | 158 +--
 drivers/net/i40e/i40e_pf.c                    |  14 +-
 drivers/net/i40e/i40e_rxtx.c                  |   8 +-
 drivers/net/i40e/i40e_rxtx.h                  |   4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |   2 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |   8 +-
 drivers/net/i40e/i40e_vf_representor.c        |  48 +-
 drivers/net/iavf/iavf.h                       |  24 +-
 drivers/net/iavf/iavf_ethdev.c                | 178 ++--
 drivers/net/iavf/iavf_hash.c                  | 320 +++---
 drivers/net/iavf/iavf_rxtx.c                  |   2 +-
 drivers/net/iavf/iavf_rxtx.h                  |  24 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c         |   4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c       |   6 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   2 +-
 drivers/net/ice/ice_dcf.c                     |   2 +-
 drivers/net/ice/ice_dcf_ethdev.c              |  86 +-
 drivers/net/ice/ice_dcf_vf_representor.c      |  56 +-
 drivers/net/ice/ice_ethdev.c                  | 180 ++--
 drivers/net/ice/ice_ethdev.h                  |  26 +-
 drivers/net/ice/ice_hash.c                    | 290 +++---
 drivers/net/ice/ice_rxtx.c                    |  16 +-
 drivers/net/ice/ice_rxtx_vec_avx2.c           |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |   4 +-
 drivers/net/ice/ice_rxtx_vec_common.h         |  28 +-
 drivers/net/ice/ice_rxtx_vec_sse.c            |   2 +-
 drivers/net/igc/igc_ethdev.c                  | 138 +--
 drivers/net/igc/igc_ethdev.h                  |  54 +-
 drivers/net/igc/igc_txrx.c                    |  48 +-
 drivers/net/ionic/ionic_ethdev.c              | 138 +--
 drivers/net/ionic/ionic_ethdev.h              |  12 +-
 drivers/net/ionic/ionic_lif.c                 |  36 +-
 drivers/net/ionic/ionic_rxtx.c                |  10 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |  64 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              | 285 +++--
 drivers/net/ixgbe/ixgbe_ethdev.h              |  18 +-
 drivers/net/ixgbe/ixgbe_fdir.c                |  24 +-
 drivers/net/ixgbe/ixgbe_flow.c                |   2 +-
 drivers/net/ixgbe/ixgbe_ipsec.c               |  12 +-
 drivers/net/ixgbe/ixgbe_pf.c                  |  34 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                | 249 +++--
 drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     |   2 +-
 drivers/net/ixgbe/ixgbe_tm.c                  |  16 +-
 drivers/net/ixgbe/ixgbe_vf_representor.c      |  16 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.c             |  14 +-
 drivers/net/ixgbe/rte_pmd_ixgbe.h             |   4 +-
 drivers/net/kni/rte_eth_kni.c                 |   8 +-
 drivers/net/liquidio/lio_ethdev.c             | 114 +-
 drivers/net/memif/memif_socket.c              |   2 +-
 drivers/net/memif/rte_eth_memif.c             |  16 +-
 drivers/net/mlx4/mlx4_ethdev.c                |  32 +-
 drivers/net/mlx4/mlx4_flow.c                  |  30 +-
 drivers/net/mlx4/mlx4_intr.c                  |   8 +-
 drivers/net/mlx4/mlx4_rxq.c                   |  18 +-
 drivers/net/mlx4/mlx4_txq.c                   |  24 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c       |  54 +-
 drivers/net/mlx5/linux/mlx5_os.c              |   6 +-
 drivers/net/mlx5/mlx5.c                       |   4 +-
 drivers/net/mlx5/mlx5.h                       |   2 +-
 drivers/net/mlx5/mlx5_defs.h                  |   6 +-
 drivers/net/mlx5/mlx5_ethdev.c                |   6 +-
 drivers/net/mlx5/mlx5_flow.c                  |  54 +-
 drivers/net/mlx5/mlx5_flow.h                  |  12 +-
 drivers/net/mlx5/mlx5_flow_dv.c               |  44 +-
 drivers/net/mlx5/mlx5_flow_verbs.c            |   4 +-
 drivers/net/mlx5/mlx5_rss.c                   |  10 +-
 drivers/net/mlx5/mlx5_rxq.c                   |  40 +-
 drivers/net/mlx5/mlx5_rxtx_vec.h              |   8 +-
 drivers/net/mlx5/mlx5_tx.c                    |  30 +-
 drivers/net/mlx5/mlx5_txq.c                   |  58 +-
 drivers/net/mlx5/mlx5_vlan.c                  |   4 +-
 drivers/net/mlx5/windows/mlx5_os.c            |   4 +-
 drivers/net/mvneta/mvneta_ethdev.c            |  32 +-
 drivers/net/mvneta/mvneta_ethdev.h            |  10 +-
 drivers/net/mvneta/mvneta_rxtx.c              |   2 +-
 drivers/net/mvpp2/mrvl_ethdev.c               | 112 +-
 drivers/net/netvsc/hn_ethdev.c                |  70 +-
 drivers/net/netvsc/hn_rndis.c                 |  50 +-
 drivers/net/nfb/nfb_ethdev.c                  |  20 +-
 drivers/net/nfb/nfb_rx.c                      |   2 +-
 drivers/net/nfp/nfp_common.c                  | 122 +--
 drivers/net/nfp/nfp_ethdev.c                  |   2 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |   2 +-
 drivers/net/ngbe/ngbe_ethdev.c                |  50 +-
 drivers/net/null/rte_eth_null.c               |  28 +-
 drivers/net/octeontx/octeontx_ethdev.c        |  74 +-
 drivers/net/octeontx/octeontx_ethdev.h        |  30 +-
 drivers/net/octeontx/octeontx_ethdev_ops.c    |  26 +-
 drivers/net/octeontx2/otx2_ethdev.c           |  96 +-
 drivers/net/octeontx2/otx2_ethdev.h           |  64 +-
 drivers/net/octeontx2/otx2_ethdev_devargs.c   |  12 +-
 drivers/net/octeontx2/otx2_ethdev_ops.c       |  14 +-
 drivers/net/octeontx2/otx2_ethdev_sec.c       |   8 +-
 drivers/net/octeontx2/otx2_flow.c             |   2 +-
 drivers/net/octeontx2/otx2_flow_ctrl.c        |  36 +-
 drivers/net/octeontx2/otx2_flow_parse.c       |   4 +-
 drivers/net/octeontx2/otx2_link.c             |  40 +-
 drivers/net/octeontx2/otx2_mcast.c            |   2 +-
 drivers/net/octeontx2/otx2_ptp.c              |   4 +-
 drivers/net/octeontx2/otx2_rss.c              |  70 +-
 drivers/net/octeontx2/otx2_rx.c               |   4 +-
 drivers/net/octeontx2/otx2_tx.c               |   2 +-
 drivers/net/octeontx2/otx2_vlan.c             |  42 +-
 drivers/net/octeontx_ep/otx_ep_ethdev.c       |   6 +-
 drivers/net/octeontx_ep/otx_ep_rxtx.c         |   6 +-
 drivers/net/pcap/pcap_ethdev.c                |  12 +-
 drivers/net/pfe/pfe_ethdev.c                  |  18 +-
 drivers/net/qede/base/mcp_public.h            |   4 +-
 drivers/net/qede/qede_ethdev.c                | 156 +--
 drivers/net/qede/qede_filter.c                |  42 +-
 drivers/net/qede/qede_rxtx.c                  |   2 +-
 drivers/net/qede/qede_rxtx.h                  |  16 +-
 drivers/net/ring/rte_eth_ring.c               |  20 +-
 drivers/net/sfc/sfc.c                         |  30 +-
 drivers/net/sfc/sfc_ef100_rx.c                |  10 +-
 drivers/net/sfc/sfc_ef100_tx.c                |  20 +-
 drivers/net/sfc/sfc_ef10_essb_rx.c            |   4 +-
 drivers/net/sfc/sfc_ef10_rx.c                 |   8 +-
 drivers/net/sfc/sfc_ef10_tx.c                 |  32 +-
 drivers/net/sfc/sfc_ethdev.c                  |  50 +-
 drivers/net/sfc/sfc_flow.c                    |   2 +-
 drivers/net/sfc/sfc_port.c                    |  52 +-
 drivers/net/sfc/sfc_repr.c                    |  10 +-
 drivers/net/sfc/sfc_rx.c                      |  50 +-
 drivers/net/sfc/sfc_tx.c                      |  50 +-
 drivers/net/softnic/rte_eth_softnic.c         |  12 +-
 drivers/net/szedata2/rte_eth_szedata2.c       |  14 +-
 drivers/net/tap/rte_eth_tap.c                 | 104 +-
 drivers/net/tap/tap_rss.h                     |   2 +-
 drivers/net/thunderx/nicvf_ethdev.c           | 102 +-
 drivers/net/thunderx/nicvf_ethdev.h           |  40 +-
 drivers/net/txgbe/txgbe_ethdev.c              | 242 ++---
 drivers/net/txgbe/txgbe_ethdev.h              |  18 +-
 drivers/net/txgbe/txgbe_ethdev_vf.c           |  24 +-
 drivers/net/txgbe/txgbe_fdir.c                |  20 +-
 drivers/net/txgbe/txgbe_flow.c                |   2 +-
 drivers/net/txgbe/txgbe_ipsec.c               |  12 +-
 drivers/net/txgbe/txgbe_pf.c                  |  34 +-
 drivers/net/txgbe/txgbe_rxtx.c                | 308 +++---
 drivers/net/txgbe/txgbe_rxtx.h                |   4 +-
 drivers/net/txgbe/txgbe_tm.c                  |  16 +-
 drivers/net/vhost/rte_eth_vhost.c             |  16 +-
 drivers/net/virtio/virtio_ethdev.c            | 124 +--
 drivers/net/vmxnet3/vmxnet3_ethdev.c          |  72 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.h          |  16 +-
 drivers/net/vmxnet3/vmxnet3_rxtx.c            |  16 +-
 examples/bbdev_app/main.c                     |   6 +-
 examples/bond/main.c                          |  14 +-
 examples/distributor/main.c                   |  12 +-
 examples/ethtool/ethtool-app/main.c           |   2 +-
 examples/ethtool/lib/rte_ethtool.c            |  18 +-
 .../pipeline_worker_generic.c                 |  16 +-
 .../eventdev_pipeline/pipeline_worker_tx.c    |  12 +-
 examples/flow_classify/flow_classify.c        |   4 +-
 examples/flow_filtering/main.c                |  16 +-
 examples/ioat/ioatfwd.c                       |   8 +-
 examples/ip_fragmentation/main.c              |  12 +-
 examples/ip_pipeline/link.c                   |  20 +-
 examples/ip_reassembly/main.c                 |  18 +-
 examples/ipsec-secgw/ipsec-secgw.c            |  32 +-
 examples/ipsec-secgw/sa.c                     |   8 +-
 examples/ipv4_multicast/main.c                |   6 +-
 examples/kni/main.c                           |   8 +-
 examples/l2fwd-crypto/main.c                  |  10 +-
 examples/l2fwd-event/l2fwd_common.c           |  10 +-
 examples/l2fwd-event/main.c                   |   2 +-
 examples/l2fwd-jobstats/main.c                |   8 +-
 examples/l2fwd-keepalive/main.c               |   8 +-
 examples/l2fwd/main.c                         |   8 +-
 examples/l3fwd-acl/main.c                     |  18 +-
 examples/l3fwd-graph/main.c                   |  14 +-
 examples/l3fwd-power/main.c                   |  16 +-
 examples/l3fwd/l3fwd_event.c                  |   4 +-
 examples/l3fwd/main.c                         |  18 +-
 examples/link_status_interrupt/main.c         |  10 +-
 .../client_server_mp/mp_server/init.c         |   4 +-
 examples/multi_process/symmetric_mp/main.c    |  14 +-
 examples/ntb/ntb_fwd.c                        |   6 +-
 examples/packet_ordering/main.c               |   4 +-
 .../performance-thread/l3fwd-thread/main.c    |  16 +-
 examples/pipeline/obj.c                       |  20 +-
 examples/ptpclient/ptpclient.c                |  10 +-
 examples/qos_meter/main.c                     |  16 +-
 examples/qos_sched/init.c                     |   6 +-
 examples/rxtx_callbacks/main.c                |   8 +-
 examples/server_node_efd/server/init.c        |   8 +-
 examples/skeleton/basicfwd.c                  |   4 +-
 examples/vhost/main.c                         |  26 +-
 examples/vm_power_manager/main.c              |   6 +-
 examples/vmdq/main.c                          |  20 +-
 examples/vmdq_dcb/main.c                      |  40 +-
 lib/ethdev/ethdev_driver.h                    |  36 +-
 lib/ethdev/rte_ethdev.c                       | 181 ++--
 lib/ethdev/rte_ethdev.h                       | 986 +++++++++++-------
 lib/ethdev/rte_flow.h                         |   2 +-
 lib/gso/rte_gso.c                             |  20 +-
 lib/gso/rte_gso.h                             |   4 +-
 lib/mbuf/rte_mbuf_core.h                      |   8 +-
 lib/mbuf/rte_mbuf_dyn.h                       |   2 +-
 339 files changed, 6601 insertions(+), 6385 deletions(-)

diff --git a/app/proc-info/main.c b/app/proc-info/main.c
index bfe5ce825b70..a4271047e693 100644
--- a/app/proc-info/main.c
+++ b/app/proc-info/main.c
@@ -757,11 +757,11 @@ show_port(void)
 		}
 
 		ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
-		if (ret == 0 && fc_conf.mode != RTE_FC_NONE)  {
+		if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE)  {
 			printf("\t  -- flow control mode %s%s high %u low %u pause %u%s%s\n",
-			       fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
-			       fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
-			       fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+			       fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+			       fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+			       fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
 			       fc_conf.autoneg ? " auto" : "",
 			       fc_conf.high_water,
 			       fc_conf.low_water,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 660d5a0364b6..31d1b0e14653 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -668,13 +668,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct test_perf *t = evt_test_priv(test);
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 			.split_hdr_size = 0,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 2775e72c580d..d202091077a6 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_eth_rxconf rx_conf;
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 			},
 		},
 	};
@@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
 			local_port_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_RSS_HASH;
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		ret = rte_eth_dev_info_get(i, &dev_info);
 		if (ret != 0) {
@@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 
 		/* Enable mbuf fast free if PMD has the capability. */
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		rx_conf = dev_info.default_rxconf;
 		rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e185..4249b6175b82 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -5,7 +5,7 @@
 #define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
 #define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
 
 /* Configuration */
 #define RXQ_NUM 4
diff --git a/app/test-pipeline/init.c b/app/test-pipeline/init.c
index fe37d63730c6..c73801904103 100644
--- a/app/test-pipeline/init.c
+++ b/app/test-pipeline/init.c
@@ -70,16 +70,16 @@ struct app_params app = {
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -178,7 +178,7 @@ app_ports_check_link(void)
 		RTE_LOG(INFO, USER1, "Port %u %s\n",
 			port,
 			link_status_text);
-		if (link.link_status == ETH_LINK_DOWN)
+		if (link.link_status == RTE_ETH_LINK_DOWN)
 			all_ports_up = 0;
 	}
 
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 3221f6e1aa40..ebea13f86ab0 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
 	int duplex;
 
 	if (!strcmp(duplexstr, "half")) {
-		duplex = ETH_LINK_HALF_DUPLEX;
+		duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	} else if (!strcmp(duplexstr, "full")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else if (!strcmp(duplexstr, "auto")) {
-		duplex = ETH_LINK_FULL_DUPLEX;
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		fprintf(stderr, "Unknown duplex parameter\n");
 		return -1;
 	}
 
 	if (!strcmp(speedstr, "10")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
 	} else if (!strcmp(speedstr, "100")) {
-		*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
-				ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+		*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+				RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
 	} else {
-		if (duplex != ETH_LINK_FULL_DUPLEX) {
+		if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
 			fprintf(stderr, "Invalid speed/duplex parameters\n");
 			return -1;
 		}
 		if (!strcmp(speedstr, "1000")) {
-			*speed = ETH_LINK_SPEED_1G;
+			*speed = RTE_ETH_LINK_SPEED_1G;
 		} else if (!strcmp(speedstr, "10000")) {
-			*speed = ETH_LINK_SPEED_10G;
+			*speed = RTE_ETH_LINK_SPEED_10G;
 		} else if (!strcmp(speedstr, "25000")) {
-			*speed = ETH_LINK_SPEED_25G;
+			*speed = RTE_ETH_LINK_SPEED_25G;
 		} else if (!strcmp(speedstr, "40000")) {
-			*speed = ETH_LINK_SPEED_40G;
+			*speed = RTE_ETH_LINK_SPEED_40G;
 		} else if (!strcmp(speedstr, "50000")) {
-			*speed = ETH_LINK_SPEED_50G;
+			*speed = RTE_ETH_LINK_SPEED_50G;
 		} else if (!strcmp(speedstr, "100000")) {
-			*speed = ETH_LINK_SPEED_100G;
+			*speed = RTE_ETH_LINK_SPEED_100G;
 		} else if (!strcmp(speedstr, "200000")) {
-			*speed = ETH_LINK_SPEED_200G;
+			*speed = RTE_ETH_LINK_SPEED_200G;
 		} else if (!strcmp(speedstr, "auto")) {
-			*speed = ETH_LINK_SPEED_AUTONEG;
+			*speed = RTE_ETH_LINK_SPEED_AUTONEG;
 		} else {
 			fprintf(stderr, "Unknown speed parameter\n");
 			return -1;
 		}
 	}
 
-	if (*speed != ETH_LINK_SPEED_AUTONEG)
-		*speed |= ETH_LINK_SPEED_FIXED;
+	if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+		*speed |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return 0;
 }
@@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
 	int ret;
 
 	if (!strcmp(res->value, "all"))
-		rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
-			ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
-			ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
-			ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
-			ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+			RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+			RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+			RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+			RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "eth"))
-		rss_conf.rss_hf = ETH_RSS_ETH;
+		rss_conf.rss_hf = RTE_ETH_RSS_ETH;
 	else if (!strcmp(res->value, "vlan"))
-		rss_conf.rss_hf = ETH_RSS_VLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
 	else if (!strcmp(res->value, "ip"))
-		rss_conf.rss_hf = ETH_RSS_IP;
+		rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	else if (!strcmp(res->value, "udp"))
-		rss_conf.rss_hf = ETH_RSS_UDP;
+		rss_conf.rss_hf = RTE_ETH_RSS_UDP;
 	else if (!strcmp(res->value, "tcp"))
-		rss_conf.rss_hf = ETH_RSS_TCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_TCP;
 	else if (!strcmp(res->value, "sctp"))
-		rss_conf.rss_hf = ETH_RSS_SCTP;
+		rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
 	else if (!strcmp(res->value, "ether"))
-		rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
 	else if (!strcmp(res->value, "port"))
-		rss_conf.rss_hf = ETH_RSS_PORT;
+		rss_conf.rss_hf = RTE_ETH_RSS_PORT;
 	else if (!strcmp(res->value, "vxlan"))
-		rss_conf.rss_hf = ETH_RSS_VXLAN;
+		rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
 	else if (!strcmp(res->value, "geneve"))
-		rss_conf.rss_hf = ETH_RSS_GENEVE;
+		rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
 	else if (!strcmp(res->value, "nvgre"))
-		rss_conf.rss_hf = ETH_RSS_NVGRE;
+		rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
 	else if (!strcmp(res->value, "l3-pre32"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
 	else if (!strcmp(res->value, "l3-pre40"))
@@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
 	else if (!strcmp(res->value, "l3-pre96"))
 		rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
 	else if (!strcmp(res->value, "l3-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
 	else if (!strcmp(res->value, "l3-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
 	else if (!strcmp(res->value, "l4-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
 	else if (!strcmp(res->value, "l4-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
 	else if (!strcmp(res->value, "l2-src-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
 	else if (!strcmp(res->value, "l2-dst-only"))
-		rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
 	else if (!strcmp(res->value, "l2tpv3"))
-		rss_conf.rss_hf = ETH_RSS_L2TPV3;
+		rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
 	else if (!strcmp(res->value, "esp"))
-		rss_conf.rss_hf = ETH_RSS_ESP;
+		rss_conf.rss_hf = RTE_ETH_RSS_ESP;
 	else if (!strcmp(res->value, "ah"))
-		rss_conf.rss_hf = ETH_RSS_AH;
+		rss_conf.rss_hf = RTE_ETH_RSS_AH;
 	else if (!strcmp(res->value, "pfcp"))
-		rss_conf.rss_hf = ETH_RSS_PFCP;
+		rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
 	else if (!strcmp(res->value, "pppoe"))
-		rss_conf.rss_hf = ETH_RSS_PPPOE;
+		rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
 	else if (!strcmp(res->value, "gtpu"))
-		rss_conf.rss_hf = ETH_RSS_GTPU;
+		rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
 	else if (!strcmp(res->value, "ecpri"))
-		rss_conf.rss_hf = ETH_RSS_ECPRI;
+		rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
 	else if (!strcmp(res->value, "mpls"))
-		rss_conf.rss_hf = ETH_RSS_MPLS;
+		rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
 	else if (!strcmp(res->value, "ipv4-chksum"))
-		rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+		rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
 	else if (!strcmp(res->value, "none"))
 		rss_conf.rss_hf = 0;
 	else if (!strcmp(res->value, "level-default")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
 	} else if (!strcmp(res->value, "level-outer")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
 	} else if (!strcmp(res->value, "level-inner")) {
-		rss_hf &= (~ETH_RSS_LEVEL_MASK);
-		rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+		rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+		rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
 	} else if (!strcmp(res->value, "default"))
 		use_default = 1;
 	else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
 			return -1;
 		}
 
-		idx = hash_index / RTE_RETA_GROUP_SIZE;
-		shift = hash_index % RTE_RETA_GROUP_SIZE;
+		idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+		shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[idx].mask |= (1ULL << shift);
 		reta_conf[idx].reta[shift] = nb_queue;
 	}
@@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
 	} else
 		printf("The reta size of port %d is %u\n",
 			res->port_id, dev_info.reta_size);
-	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+	if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		fprintf(stderr,
 			"Currently do not support more than %u entries of redirection table\n",
-			ETH_RSS_RETA_SIZE_512);
+			RTE_ETH_RSS_RETA_SIZE_512);
 		return;
 	}
 
@@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
 	char *end;
 	char *str_fld[8];
 	uint16_t i;
-	uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 	int ret;
 
 	p = strchr(p0, '(');
@@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
 	if (ret != 0)
 		return;
 
-	max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+	max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
 	if (res->size == 0 || res->size > max_reta_size) {
 		fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
 			res->size, max_reta_size);
@@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
 		return;
 	}
 
-	if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+	if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
 		fprintf(stderr,
 			"The invalid number of traffic class, only 4 or 8 allowed.\n");
 		return;
@@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
 	enum rte_vlan_type vlan_type;
 
 	if (!strcmp(res->vlan_type, "inner"))
-		vlan_type = ETH_VLAN_TYPE_INNER;
+		vlan_type = RTE_ETH_VLAN_TYPE_INNER;
 	else if (!strcmp(res->vlan_type, "outer"))
-		vlan_type = ETH_VLAN_TYPE_OUTER;
+		vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
 	else {
 		fprintf(stderr, "Unknown vlan type\n");
 		return;
@@ -4615,55 +4615,55 @@ csum_show(int port_id)
 	printf("Parse tunnel is %s\n",
 		(ports[port_id].parse_tunnel) ? "on" : "off");
 	printf("IP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
 	printf("UDP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
 	printf("TCP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
 	printf("SCTP checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
 	printf("Outer-Ip checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
 	printf("Outer-Udp checksum offload is %s\n",
-		(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
 
 	/* display warnings if configuration is not supported by the NIC */
 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
 	if (ret != 0)
 		return;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware UDP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware TCP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
 		fprintf(stderr,
 			"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
 			port_id);
 	}
-	if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 			== 0) {
 		fprintf(stderr,
 			"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
 
 		if (!strcmp(res->proto, "ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_IPV4_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"IP checksum offload is not supported by port %u\n",
@@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_UDP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"UDP checksum offload is not supported by port %u\n",
@@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "tcp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_TCP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"TCP checksum offload is not supported by port %u\n",
@@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "sctp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-						DEV_TX_OFFLOAD_SCTP_CKSUM)) {
-				csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+				csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"SCTP checksum offload is not supported by port %u\n",
@@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-ip")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer IP checksum offload is not supported by port %u\n",
@@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
 			}
 		} else if (!strcmp(res->proto, "outer-udp")) {
 			if (hw == 0 || (dev_info.tx_offload_capa &
-					DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				csum_offloads |=
-						DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+						RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 			} else {
 				fprintf(stderr,
 					"Outer UDP checksum offload is not supported by port %u\n",
@@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr, "Error: TSO is not supported by port %d\n",
 			res->port_id);
 		return;
@@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
 
 	if (ports[res->port_id].tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_TCP_TSO;
+						~RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO for non-tunneled packets is disabled\n");
 	} else {
 		ports[res->port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_TCP_TSO;
+						RTE_ETH_TX_OFFLOAD_TCP_TSO;
 		printf("TSO segment size for non-tunneled packets is %d\n",
 			ports[res->port_id].tso_segsz);
 	}
@@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
 		return;
 
 	if ((ports[res->port_id].tso_segsz != 0) &&
-		(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+		(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
 		fprintf(stderr,
 			"Warning: TSO enabled but not supported by port %d\n",
 			res->port_id);
@@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
 	if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
 		return dev_info;
 
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
 		fprintf(stderr,
 			"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		fprintf(stderr,
 			"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
-	if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
 		fprintf(stderr,
 			"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
 			port_id);
@@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 	dev_info = check_tunnel_tso_nic_support(res->port_id);
 	if (ports[res->port_id].tunnel_tso_segsz == 0) {
 		ports[res->port_id].dev_conf.txmode.offloads &=
-			~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_IP_TNL_TSO |
-			  DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 		printf("TSO for tunneled packets is disabled\n");
 	} else {
-		uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-					 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					 DEV_TX_OFFLOAD_IP_TNL_TSO |
-					 DEV_TX_OFFLOAD_UDP_TNL_TSO);
+		uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 
 		ports[res->port_id].dev_conf.txmode.offloads |=
 			(tso_offloads & dev_info.tx_offload_capa);
@@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
 			fprintf(stderr,
 				"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
 		if (!(ports[res->port_id].dev_conf.txmode.offloads &
-		      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+		      RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 			fprintf(stderr,
 				"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
 	}
@@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
 		return;
 	}
 
-	if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		rx_fc_en = true;
-	if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+	if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
 		tx_fc_en = true;
 
 	printf("\n%s Flow control infos for port %-2d %s\n",
@@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
-			{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	/* Partial command line, retrieve current configuration */
@@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
 			return;
 		}
 
-		if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			rx_fc_en = 1;
-		if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
-		    (fc_conf.mode == RTE_FC_FULL))
+		if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+		    (fc_conf.mode == RTE_ETH_FC_FULL))
 			tx_fc_en = 1;
 	}
 
@@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
 
 	/*
 	 * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
-	 * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+	 * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
 	 * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
-	 * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+	 * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
 	 */
 	static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
-		{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+		{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
 	};
 
 	memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
 	int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
 	if (!strcmp(res->what,"rxmode")) {
 		if (!strcmp(res->mode, "AUPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
 		else if (!strcmp(res->mode, "ROPE"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
 		else if (!strcmp(res->mode, "BAM"))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
 		else if (!strncmp(res->mode, "MPE",3))
-			vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+			vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 	}
 
 	RTE_SET_USED(is_on);
@@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
 	int ret;
 
 	tunnel_udp.udp_port = res->udp_port;
-	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+	tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 
 	if (!strcmp(res->what, "add"))
 		ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
 	tunnel_udp.udp_port = res->udp_port;
 
 	if (!strcmp(res->tunnel_type, "vxlan")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
 	} else if (!strcmp(res->tunnel_type, "geneve")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
 	} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
 	} else if (!strcmp(res->tunnel_type, "ecpri")) {
-		tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+		tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
 	} else {
 		fprintf(stderr, "Invalid tunnel type\n");
 		return;
@@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
 #endif
@@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MACSEC_INSERT;
+						RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
@@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
 	if (ret != 0)
 		return;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
 #ifdef RTE_NET_IXGBE
 		ret = rte_pmd_ixgbe_macsec_disable(port_id);
 #endif
@@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
 	switch (ret) {
 	case 0:
 		ports[port_id].dev_conf.txmode.offloads &=
-						~DEV_TX_OFFLOAD_MACSEC_INSERT;
+						~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 		cmd_reconfig_device_queue(port_id, 1, 1);
 		break;
 	case -ENODEV:
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cad78350dcc9..a18871d461c4 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -86,62 +86,62 @@ static const struct {
 };
 
 const struct rss_type_info rss_type_table[] = {
-	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
-		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
-		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
-		ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
 	{ "none", 0 },
-	{ "eth", ETH_RSS_ETH },
-	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
-	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
-	{ "vlan", ETH_RSS_VLAN },
-	{ "s-vlan", ETH_RSS_S_VLAN },
-	{ "c-vlan", ETH_RSS_C_VLAN },
-	{ "ipv4", ETH_RSS_IPV4 },
-	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
-	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
-	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
-	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
-	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
-	{ "ipv6", ETH_RSS_IPV6 },
-	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
-	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
-	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
-	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
-	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
-	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
-	{ "ipv6-ex", ETH_RSS_IPV6_EX },
-	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
-	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
-	{ "port", ETH_RSS_PORT },
-	{ "vxlan", ETH_RSS_VXLAN },
-	{ "geneve", ETH_RSS_GENEVE },
-	{ "nvgre", ETH_RSS_NVGRE },
-	{ "ip", ETH_RSS_IP },
-	{ "udp", ETH_RSS_UDP },
-	{ "tcp", ETH_RSS_TCP },
-	{ "sctp", ETH_RSS_SCTP },
-	{ "tunnel", ETH_RSS_TUNNEL },
+	{ "eth", RTE_ETH_RSS_ETH },
+	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+	{ "vlan", RTE_ETH_RSS_VLAN },
+	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
+	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
+	{ "ipv4", RTE_ETH_RSS_IPV4 },
+	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+	{ "ipv6", RTE_ETH_RSS_IPV6 },
+	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+	{ "port", RTE_ETH_RSS_PORT },
+	{ "vxlan", RTE_ETH_RSS_VXLAN },
+	{ "geneve", RTE_ETH_RSS_GENEVE },
+	{ "nvgre", RTE_ETH_RSS_NVGRE },
+	{ "ip", RTE_ETH_RSS_IP },
+	{ "udp", RTE_ETH_RSS_UDP },
+	{ "tcp", RTE_ETH_RSS_TCP },
+	{ "sctp", RTE_ETH_RSS_SCTP },
+	{ "tunnel", RTE_ETH_RSS_TUNNEL },
 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
-	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
-	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
-	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
-	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
-	{ "esp", ETH_RSS_ESP },
-	{ "ah", ETH_RSS_AH },
-	{ "l2tpv3", ETH_RSS_L2TPV3 },
-	{ "pfcp", ETH_RSS_PFCP },
-	{ "pppoe", ETH_RSS_PPPOE },
-	{ "gtpu", ETH_RSS_GTPU },
-	{ "ecpri", ETH_RSS_ECPRI },
-	{ "mpls", ETH_RSS_MPLS },
-	{ "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
-	{ "l4-chksum", ETH_RSS_L4_CHKSUM },
+	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+	{ "esp", RTE_ETH_RSS_ESP },
+	{ "ah", RTE_ETH_RSS_AH },
+	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+	{ "pfcp", RTE_ETH_RSS_PFCP },
+	{ "pppoe", RTE_ETH_RSS_PPPOE },
+	{ "gtpu", RTE_ETH_RSS_GTPU },
+	{ "ecpri", RTE_ETH_RSS_ECPRI },
+	{ "mpls", RTE_ETH_RSS_MPLS },
+	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
 	{ NULL, 0 },
 };
 
@@ -538,39 +538,39 @@ static void
 device_infos_display_speeds(uint32_t speed_capa)
 {
 	printf("\n\tDevice speed capability:");
-	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
 		printf(" Autonegotiate (all speeds)");
-	if (speed_capa & ETH_LINK_SPEED_FIXED)
+	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
 		printf(" Disable autonegotiate (fixed speed)  ");
-	if (speed_capa & ETH_LINK_SPEED_10M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
 		printf(" 10 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_10M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
 		printf(" 10 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M_HD)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
 		printf(" 100 Mbps half-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_100M)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
 		printf(" 100 Mbps full-duplex  ");
-	if (speed_capa & ETH_LINK_SPEED_1G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
 		printf(" 1 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_2_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
 		printf(" 2.5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_5G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
 		printf(" 5 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_10G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
 		printf(" 10 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_20G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
 		printf(" 20 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_25G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
 		printf(" 25 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_40G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
 		printf(" 40 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_50G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
 		printf(" 50 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_56G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
 		printf(" 56 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_100G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
 		printf(" 100 Gbps  ");
-	if (speed_capa & ETH_LINK_SPEED_200G)
+	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
 		printf(" 200 Gbps  ");
 }
 
@@ -723,9 +723,9 @@ port_infos_display(portid_t port_id)
 
 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
-	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 	       ("full-duplex") : ("half-duplex"));
-	printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 	       ("On") : ("Off"));
 
 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
@@ -743,22 +743,22 @@ port_infos_display(portid_t port_id)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 	if (vlan_offload >= 0){
 		printf("VLAN offload: \n");
-		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
 			printf("  strip on, ");
 		else
 			printf("  strip off, ");
 
-		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
 			printf("filter on, ");
 		else
 			printf("filter off, ");
 
-		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
 			printf("extend on, ");
 		else
 			printf("extend off, ");
 
-		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
 			printf("qinq strip on\n");
 		else
 			printf("qinq strip off\n");
@@ -2953,8 +2953,8 @@ port_rss_reta_info(portid_t port_id,
 	}
 
 	for (i = 0; i < nb_entries; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@@ -3427,7 +3427,7 @@ dcb_fwd_config_setup(void)
 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
 		fwd_lcores[lc_id]->stream_nb = 0;
 		fwd_lcores[lc_id]->stream_idx = sm_id;
-		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
 			/* if the nb_queue is zero, means this tc is
 			 * not enabled on the POOL
 			 */
@@ -4490,11 +4490,11 @@ vlan_extend_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	} else {
-		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4520,11 +4520,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
-		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4565,11 +4565,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	} else {
-		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4595,11 +4595,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
 
 	if (on) {
-		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	} else {
-		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
-		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@@ -4669,7 +4669,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 		return;
 
 	if (ports[port_id].dev_conf.txmode.offloads &
-	    DEV_TX_OFFLOAD_QINQ_INSERT) {
+	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
 		return;
 	}
@@ -4678,7 +4678,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: vlan insert is not supported by port %d\n",
 			port_id);
@@ -4686,7 +4686,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	ports[port_id].tx_vlan_id = vlan_id;
 }
 
@@ -4705,7 +4705,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	if (ret != 0)
 		return;
 
-	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
 		fprintf(stderr,
 			"Error: qinq insert not supported by port %d\n",
 			port_id);
@@ -4713,8 +4713,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
 	}
 
 	tx_vlan_reset(port_id);
-	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
-						    DEV_TX_OFFLOAD_QINQ_INSERT);
+	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = vlan_id;
 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
 }
@@ -4723,8 +4723,8 @@ void
 tx_vlan_reset(portid_t port_id)
 {
 	ports[port_id].dev_conf.txmode.offloads &=
-				~(DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_QINQ_INSERT);
+				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
 	ports[port_id].tx_vlan_id = 0;
 	ports[port_id].tx_vlan_id_outer = 0;
 }
@@ -5130,7 +5130,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
 	ret = eth_link_get_nowait_print_err(port_id, &link);
 	if (ret < 0)
 		return 1;
-	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
 	    rate > link.link_speed) {
 		fprintf(stderr,
 			"Invalid rate value:%u bigger than link speed: %u\n",
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 090797318a35..75b24487e72e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
 			ol_flags |= PKT_TX_IP_CKSUM;
 		} else {
-			if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 				ol_flags |= PKT_TX_IP_CKSUM;
 			} else {
 				ipv4_hdr->hdr_checksum = 0;
@@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
-			if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+			if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 				ol_flags |= PKT_TX_UDP_CKSUM;
 			} else {
 				udp_hdr->dgram_cksum = 0;
@@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		if (tso_segsz)
 			ol_flags |= PKT_TX_TCP_SEG;
-		else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 			ol_flags |= PKT_TX_TCP_CKSUM;
 		} else {
 			tcp_hdr->cksum = 0;
@@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 			((char *)l3_hdr + info->l3_len);
 		/* sctp payload must be a multiple of 4 to be
 		 * offloaded */
-		if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 			((ipv4_hdr->total_length & 0x3) == 0)) {
 			ol_flags |= PKT_TX_SCTP_CKSUM;
 		} else {
@@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ipv4_hdr->hdr_checksum = 0;
 		ol_flags |= PKT_TX_OUTER_IPV4;
 
-		if (tx_offloads	& DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
 		else
 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
 		ol_flags |= PKT_TX_TCP_SEG;
 
 	/* Skip SW outer UDP checksum generation if HW supports it */
-	if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
 		if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
 			udp_hdr->dgram_cksum
 				= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		if (info.is_tunnel == 1) {
 			if (info.tunnel_tso_segsz ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 			    (tx_offloads &
-			     DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
 				m->outer_l2_len = info.outer_l2_len;
 				m->outer_l3_len = info.outer_l3_len;
 				m->l2_len = info.l2_len;
@@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 					rte_be_to_cpu_16(info.outer_ethertype),
 					info.outer_l3_len);
 			/* dump tx packet info */
-			if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					    DEV_TX_OFFLOAD_UDP_CKSUM |
-					    DEV_TX_OFFLOAD_TCP_CKSUM |
-					    DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
 				info.tso_segsz != 0)
 				printf("tx: m->l2_len=%d m->l3_len=%d "
 					"m->l4_len=%d\n",
 					m->l2_len, m->l3_len, m->l4_len);
 			if (info.is_tunnel == 1) {
 				if ((tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 				    (tx_offloads &
-				    DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
 				    (tx_ol_flags & PKT_TX_OUTER_IPV6))
 					printf("tx: m->outer_l2_len=%d "
 						"m->outer_l3_len=%d\n",
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 7ebed9fed334..03d026dec169 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 	vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 
 	tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags |= PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads	& DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index ee76df7f0323..57e00bca20e7 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	txp = &ports[fs->tx_port];
 	tx_offloads = txp->dev_conf.txmode.offloads;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
diff --git a/app/test-pmd/macswap_common.h b/app/test-pmd/macswap_common.h
index 7e9a3590a436..7ade9a686b7c 100644
--- a/app/test-pmd/macswap_common.h
+++ b/app/test-pmd/macswap_common.h
@@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
 {
 	uint64_t ol_flags = 0;
 
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
 			PKT_TX_VLAN : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
 			PKT_TX_QINQ : 0;
-	ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+	ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
 			PKT_TX_MACSEC : 0;
 
 	return ol_flags;
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index afc75f6bd213..cb40917077ea 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -547,29 +547,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
 static int
 parse_link_speed(int n)
 {
-	uint32_t speed = ETH_LINK_SPEED_FIXED;
+	uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
 
 	switch (n) {
 	case 1000:
-		speed |= ETH_LINK_SPEED_1G;
+		speed |= RTE_ETH_LINK_SPEED_1G;
 		break;
 	case 10000:
-		speed |= ETH_LINK_SPEED_10G;
+		speed |= RTE_ETH_LINK_SPEED_10G;
 		break;
 	case 25000:
-		speed |= ETH_LINK_SPEED_25G;
+		speed |= RTE_ETH_LINK_SPEED_25G;
 		break;
 	case 40000:
-		speed |= ETH_LINK_SPEED_40G;
+		speed |= RTE_ETH_LINK_SPEED_40G;
 		break;
 	case 50000:
-		speed |= ETH_LINK_SPEED_50G;
+		speed |= RTE_ETH_LINK_SPEED_50G;
 		break;
 	case 100000:
-		speed |= ETH_LINK_SPEED_100G;
+		speed |= RTE_ETH_LINK_SPEED_100G;
 		break;
 	case 200000:
-		speed |= ETH_LINK_SPEED_200G;
+		speed |= RTE_ETH_LINK_SPEED_200G;
 		break;
 	case 100:
 	case 10:
@@ -1002,13 +1002,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
 				if (!strcmp(optarg, "64K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_64K;
+						RTE_ETH_FDIR_PBALLOC_64K;
 				else if (!strcmp(optarg, "128K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_128K;
+						RTE_ETH_FDIR_PBALLOC_128K;
 				else if (!strcmp(optarg, "256K"))
 					fdir_conf.pballoc =
-						RTE_FDIR_PBALLOC_256K;
+						RTE_ETH_FDIR_PBALLOC_256K;
 				else
 					rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
 						 " must be: 64K or 128K or 256K\n",
@@ -1050,34 +1050,34 @@ launch_args_parse(int argc, char** argv)
 			}
 #endif
 			if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 			if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
-				rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 			if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
-				rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 			if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
-				rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-rx-timestamp"))
-				rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 			if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-filter"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-vlan-extend"))
-				rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 			if (!strcmp(lgopts[opt_idx].name,
 					"enable-hw-qinq-strip"))
-				rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+				rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
 			if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
 				rx_drop_en = 1;
@@ -1099,13 +1099,13 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
 				set_pkt_forwarding_mode(optarg);
 			if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
-				rss_hf = ETH_RSS_IP;
+				rss_hf = RTE_ETH_RSS_IP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
-				rss_hf = ETH_RSS_UDP;
+				rss_hf = RTE_ETH_RSS_UDP;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
-				rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
-				rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+				rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 			if (!strcmp(lgopts[opt_idx].name, "rxq")) {
 				n = atoi(optarg);
 				if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@@ -1495,12 +1495,12 @@ launch_args_parse(int argc, char** argv)
 			if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
 				char *end = NULL;
 				n = strtoul(optarg, &end, 16);
-				if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+				if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
 					rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
 				else
 					rte_exit(EXIT_FAILURE,
 						 "rx-mq-mode must be >= 0 and <= %d\n",
-						 ETH_MQ_RX_VMDQ_DCB_RSS);
+						 RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
 			}
 			if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
 				record_core_cycles = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 2b835a27bcd9..a66dfb297c65 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -349,7 +349,7 @@ uint64_t noisy_lkup_num_reads_writes;
 /*
  * Receive Side Scaling (RSS) configuration.
  */
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 
 /*
  * Port topology configuration
@@ -460,12 +460,12 @@ lcoreid_t latencystats_lcore_id = -1;
 struct rte_eth_rxmode rx_mode;
 
 struct rte_eth_txmode tx_mode = {
-	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 };
 
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
 	.mode = RTE_FDIR_MODE_NONE,
-	.pballoc = RTE_FDIR_PBALLOC_64K,
+	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 	.status = RTE_FDIR_REPORT_STATUS,
 	.mask = {
 		.vlan_tci_mask = 0xFFEF,
@@ -524,7 +524,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 /*
  * hexadecimal bitmask of RX mq mode can be enabled.
  */
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 
 /*
  * Used to set forced link speed
@@ -1578,9 +1578,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
 
-	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		port->dev_conf.txmode.offloads &=
-			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Apply Rx offloads configuration */
 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
@@ -1717,8 +1717,8 @@ init_config(void)
 
 	init_port_config();
 
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3769,17 +3769,17 @@ init_port_config(void)
 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
 				port->dev_conf.rxmode.mq_mode =
 					(enum rte_eth_rx_mq_mode)
-						(rx_mq_mode & ETH_MQ_RX_RSS);
+						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
 			} else {
-				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 				port->dev_conf.rxmode.offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 				for (i = 0;
 				     i < port->dev_info.nb_rx_queues;
 				     i++)
 					port->rx_conf[i].offloads &=
-						~DEV_RX_OFFLOAD_RSS_HASH;
+						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
 			}
 		}
 
@@ -3867,9 +3867,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		vmdq_rx_conf->enable_default_pool = 0;
 		vmdq_rx_conf->default_pool = 0;
 		vmdq_rx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 		vmdq_tx_conf->nb_queue_pools =
-			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
 
 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@@ -3877,7 +3877,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 			vmdq_rx_conf->pool_map[i].pools =
 				1 << (i % vmdq_rx_conf->nb_queue_pools);
 		}
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
 		}
@@ -3885,8 +3885,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		/* set DCB mode of RX and TX of multiple queues */
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&eth_conf->rx_adv_conf.dcb_rx_conf;
@@ -3902,23 +3902,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
 		rx_conf->nb_tcs = num_tcs;
 		tx_conf->nb_tcs = num_tcs;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			rx_conf->dcb_tc[i] = i % num_tcs;
 			tx_conf->dcb_tc[i] = i % num_tcs;
 		}
 
 		eth_conf->rxmode.mq_mode =
 				(enum rte_eth_rx_mq_mode)
-					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
-		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
 	}
 
 	if (pfc_en)
 		eth_conf->dcb_capability_en =
-				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
 	else
-		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
 
 	return 0;
 }
@@ -3947,7 +3947,7 @@ init_port_dcb_config(portid_t pid,
 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
 	if (retval < 0)
 		return retval;
-	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	/* re-configure the device . */
 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@@ -3997,7 +3997,7 @@ init_port_dcb_config(portid_t pid,
 
 	rxtx_port_config(pid);
 	/* VLAN filter */
-	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
 		rx_vft_set(pid, vlan_tags[i], 1);
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 071e4e7d63a3..669ce1e87d79 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -493,7 +493,7 @@ extern lcoreid_t bitrate_lcore_id;
 extern uint8_t bitrate_enabled;
 #endif
 
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
 
 extern uint32_t max_rx_pkt_len;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e45f8840c91c..9eb7992815e8 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	tx_offloads = txp->dev_conf.txmode.offloads;
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
-	if (tx_offloads	& DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ol_flags = PKT_TX_VLAN_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		ol_flags |= PKT_TX_QINQ_PKT;
-	if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
 		ol_flags |= PKT_TX_MACSEC;
 
 	/*
diff --git a/app/test/test_ethdev_link.c b/app/test/test_ethdev_link.c
index ee11987bae28..6248aea49abd 100644
--- a/app/test/test_ethdev_link.c
+++ b/app/test/test_ethdev_link.c
@@ -14,10 +14,10 @@ test_link_status_up_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -27,9 +27,9 @@ test_link_status_up_default(void)
 	TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
 		text, strlen(text), "Invalid default link status string");
 
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_FIXED;
-	link_status.link_speed = ETH_SPEED_NUM_10M,
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #2: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -37,7 +37,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -45,7 +45,7 @@ test_link_status_up_default(void)
 		text, strlen(text), "Invalid default link status "
 		"string with HDX");
 
-	link_status.link_speed = ETH_SPEED_NUM_NONE;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #3: %s\n", text);
 	RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@@ -54,9 +54,9 @@ test_link_status_up_default(void)
 		"string with HDX");
 
 	/* test max str len */
-	link_status.link_speed = ETH_SPEED_NUM_200G;
-	link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link_status.link_autoneg = ETH_LINK_AUTONEG;
+	link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+	link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
 	printf("Default link up #4:len = %d, %s\n", ret, text);
 	RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@@ -69,10 +69,10 @@ test_link_status_down_default(void)
 {
 	int ret = 0;
 	struct rte_eth_link link_status = {
-		.link_speed = ETH_SPEED_NUM_2_5G,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_speed = RTE_ETH_SPEED_NUM_2_5G,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -90,9 +90,9 @@ test_link_status_invalid(void)
 	int ret = 0;
 	struct rte_eth_link link_status = {
 		.link_speed = 55555,
-		.link_status = ETH_LINK_UP,
-		.link_autoneg = ETH_LINK_AUTONEG,
-		.link_duplex = ETH_LINK_FULL_DUPLEX
+		.link_status = RTE_ETH_LINK_UP,
+		.link_autoneg = RTE_ETH_LINK_AUTONEG,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
 	};
 	char text[RTE_ETH_LINK_MAX_STR_LEN];
 
@@ -116,21 +116,21 @@ test_link_speed_all_values(void)
 		const char *value;
 		uint32_t link_speed;
 	} speed_str_map[] = {
-		{ "None",   ETH_SPEED_NUM_NONE },
-		{ "10 Mbps",  ETH_SPEED_NUM_10M },
-		{ "100 Mbps", ETH_SPEED_NUM_100M },
-		{ "1 Gbps",   ETH_SPEED_NUM_1G },
-		{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
-		{ "5 Gbps",   ETH_SPEED_NUM_5G },
-		{ "10 Gbps",  ETH_SPEED_NUM_10G },
-		{ "20 Gbps",  ETH_SPEED_NUM_20G },
-		{ "25 Gbps",  ETH_SPEED_NUM_25G },
-		{ "40 Gbps",  ETH_SPEED_NUM_40G },
-		{ "50 Gbps",  ETH_SPEED_NUM_50G },
-		{ "56 Gbps",  ETH_SPEED_NUM_56G },
-		{ "100 Gbps", ETH_SPEED_NUM_100G },
-		{ "200 Gbps", ETH_SPEED_NUM_200G },
-		{ "Unknown",  ETH_SPEED_NUM_UNKNOWN },
+		{ "None",   RTE_ETH_SPEED_NUM_NONE },
+		{ "10 Mbps",  RTE_ETH_SPEED_NUM_10M },
+		{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+		{ "1 Gbps",   RTE_ETH_SPEED_NUM_1G },
+		{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+		{ "5 Gbps",   RTE_ETH_SPEED_NUM_5G },
+		{ "10 Gbps",  RTE_ETH_SPEED_NUM_10G },
+		{ "20 Gbps",  RTE_ETH_SPEED_NUM_20G },
+		{ "25 Gbps",  RTE_ETH_SPEED_NUM_25G },
+		{ "40 Gbps",  RTE_ETH_SPEED_NUM_40G },
+		{ "50 Gbps",  RTE_ETH_SPEED_NUM_50G },
+		{ "56 Gbps",  RTE_ETH_SPEED_NUM_56G },
+		{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+		{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+		{ "Unknown",  RTE_ETH_SPEED_NUM_UNKNOWN },
 		{ "Invalid",   50505 }
 	};
 
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index add4d8a67821..a09253e91814 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 		.intr_conf = {
 			.rxq = 1,
@@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
 {
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_NONE,
+			.mq_mode = RTE_ETH_MQ_RX_NONE,
 		},
 	};
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index 96733554b6c4..40ab0d5c4ca4 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
 
 static const struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 5388d18125a6..8a9ef851789f 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 189d2430f27e..351129de2f9b 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params  = {
 
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index e7bb0497b663..f9eae9397386 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -52,7 +52,7 @@ struct slave_conf {
 
 	struct rte_eth_rss_conf rss_conf;
 	uint8_t rss_key[40];
-	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t is_slave;
 	struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@@ -61,7 +61,7 @@ struct slave_conf {
 struct link_bonding_rssconf_unittest_params {
 	uint8_t bond_port_id;
 	struct rte_eth_dev_info bond_dev_info;
-	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
 	struct slave_conf slave_ports[SLAVE_COUNT];
 
 	struct rte_mempool *mbuf_pool;
@@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
  */
 static struct rte_eth_conf default_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
 static struct rte_eth_conf rss_pmd_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IPV6,
+			.rss_hf = RTE_ETH_RSS_IPV6,
 		},
 	},
 	.lpbk_mode = 0,
@@ -207,13 +207,13 @@ bond_slaves(void)
 static int
 reta_set(uint16_t port_id, uint8_t value, int reta_size)
 {
-	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
 	int i, j;
 
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
 		/* select all fields to set */
 		reta_conf[i].mask = ~0LL;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			reta_conf[i].reta[j] = value;
 	}
 
@@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
 	for (i = 0; i < test_params.bond_dev_info.reta_size;
 			i++) {
 
-		int index = i / RTE_RETA_GROUP_SIZE;
-		int shift = i % RTE_RETA_GROUP_SIZE;
+		int index = i / RTE_ETH_RETA_GROUP_SIZE;
+		int shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (port->reta_conf[index].reta[shift] !=
 				test_params.bond_reta_conf[index].reta[shift])
@@ -251,7 +251,7 @@ static int
 bond_reta_fetch(void) {
 	unsigned j;
 
-	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+	for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
 			j++)
 		test_params.bond_reta_conf[j].mask = ~0LL;
 
@@ -268,7 +268,7 @@ static int
 slave_reta_fetch(struct slave_conf *port) {
 	unsigned j;
 
-	for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+	for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
 		port->reta_conf[j].mask = ~0LL;
 
 	TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index a3b4f52c65e6..1df86ce080e5 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 1,  /* enable loopback */
 };
@@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		/* bulk alloc rx, full-featured tx */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "hybrid")) {
 		/* bulk alloc rx, vector tx
@@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
 		 */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 		return 0;
 	} else if (!strcmp(mode, "full")) {
 		/* full feature rx,tx pair */
 		tx_conf.tx_rs_thresh = 32;
 		tx_conf.tx_free_thresh = 32;
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		return 0;
 	}
 
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7e15b47eb0fb..d9f2e4f66bde 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -53,7 +53,7 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 	while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
 		rte_pktmbuf_free(pkt);
@@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
 		int wait_to_complete __rte_unused)
 {
 	if (!bonded_eth_dev->data->dev_started)
-		bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -562,9 +562,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 	eth_dev->data->nb_rx_queues = (uint16_t)1;
 	eth_dev->data->nb_tx_queues = (uint16_t)1;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-	eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 53560d3830d7..1c0ea988f239 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue configuration.
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index 11fbebfcd243..0fa57abfa3e0 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
 - HW managed packets enqueued from ethdev to eventdev exposed through event eth
   RX adapter.
 - N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
   capability while maintaining receive packet order.
 - Full Rx/Tx offload support defined through ethdev queue config.
 
diff --git a/doc/guides/nics/af_packet.rst b/doc/guides/nics/af_packet.rst
index bdd6e7263c85..54feffdef4bd 100644
--- a/doc/guides/nics/af_packet.rst
+++ b/doc/guides/nics/af_packet.rst
@@ -70,5 +70,5 @@ Features and Limitations
 ------------------------
 
 The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
 application.
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index aa6032889a55..b3d10f30dc77 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
     * TX: only the following reduced set of transmit offloads is supported in
       vector mode::
 
-       DEV_TX_OFFLOAD_MBUF_FAST_FREE
+       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 
     * RX: only the following reduced set of receive offloads is supported in
       vector mode (note that jumbo MTU is allowed only when the MTU setting
-      does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
-       DEV_RX_OFFLOAD_VLAN_STRIP
-       DEV_RX_OFFLOAD_KEEP_CRC
-       DEV_RX_OFFLOAD_IPV4_CKSUM
-       DEV_RX_OFFLOAD_UDP_CKSUM
-       DEV_RX_OFFLOAD_TCP_CKSUM
-       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-       DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-       DEV_RX_OFFLOAD_RSS_HASH
-       DEV_RX_OFFLOAD_VLAN_FILTER
+      does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+       RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+       RTE_ETH_RX_OFFLOAD_KEEP_CRC
+       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+       RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+       RTE_ETH_RX_OFFLOAD_RSS_HASH
+       RTE_ETH_RX_OFFLOAD_VLAN_FILTER
 
 The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
 vector processing is made at run-time when the port is started; if no transmit
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 91bdcd065a95..0209730b904a 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -432,7 +432,7 @@ Limitations
 .. code-block:: console
 
      vlan_offload = rte_eth_dev_get_vlan_offload(port);
-     vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+     vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
 Another alternative is modify the adapter's ingress VLAN rewrite mode so that
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index d35751d5b5a7..594e98a6b803 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -30,7 +30,7 @@ Speed capabilities
 
 Supports getting the speed capabilities that the current device is capable of.
 
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
 * **[related]  API**: ``rte_eth_dev_info_get()``.
 
 
@@ -101,11 +101,11 @@ Supports Rx interrupts.
 Lock-free Tx queue
 ------------------
 
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
 invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
 
-* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses]    rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
 * **[related]  API**: ``rte_eth_tx_burst()``.
 
 
@@ -117,8 +117,8 @@ Fast mbuf free
 Supports optimization for fast release of mbufs following successful Tx.
 Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
 
 
 .. _nic_features_free_tx_mbuf_on_demand:
@@ -177,7 +177,7 @@ Scattered Rx
 
 Supports receiving segmented mbufs.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
 * **[implements] datapath**: ``Scattered Rx function``.
 * **[implements] rte_eth_dev_data**: ``scattered_rx``.
 * **[provides]   eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -205,12 +205,12 @@ LRO
 
 Supports Large Receive Offload.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
   ``dev_conf.rxmode.max_lro_pkt_size``.
 * **[implements] datapath**: ``LRO functionality``.
 * **[implements] rte_eth_dev_data**: ``lro``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
 * **[provides]   rte_eth_dev_info**: ``max_lro_pkt_size``.
 
 
@@ -221,12 +221,12 @@ TSO
 
 Supports TCP Segmentation Offloading.
 
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
 * **[uses]       rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
 * **[uses]       mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
 * **[implements] datapath**: ``TSO functionality``.
-* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides]   rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
 
 
 .. _nic_features_promiscuous_mode:
@@ -287,9 +287,9 @@ RSS hash
 
 Supports RSS hashing on RX.
 
-* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses]     user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
 * **[uses]     user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
@@ -302,7 +302,7 @@ Inner RSS
 Supports RX RSS hashing on Inner headers.
 
 * **[uses]    rte_flow_action_rss**: ``level``.
-* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
 
 
@@ -339,7 +339,7 @@ VMDq
 
 Supports Virtual Machine Device Queues (VMDq).
 
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -362,7 +362,7 @@ DCB
 
 Supports Data Center Bridging (DCB).
 
-* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses]       user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
 * **[uses]       user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
 * **[uses]       user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@@ -378,7 +378,7 @@ VLAN filter
 
 Supports filtering of a VLAN Tag identifier.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
 * **[implements] eth_dev_ops**: ``vlan_filter_set``.
 * **[related]    API**: ``rte_eth_dev_vlan_filter()``.
 
@@ -416,13 +416,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
 operations of security protocol while packet is received in NIC. NIC is not aware
 of protocol operations. See Security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@@ -438,14 +438,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
 packet is received at NIC. The NIC is capable of understanding the security
 protocol operations. See security library and PMD documentation for more details.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[uses]       mbuf**: ``mbuf.l2_len``.
 * **[implements] rte_security_ops**: ``session_create``, ``session_update``,
   ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
   ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
   ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
 * **[provides]   rte_security_ops, capabilities_get**:  ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@@ -459,7 +459,7 @@ CRC offload
 Supports CRC stripping by hardware.
 A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
 
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
 
 
 .. _nic_features_vlan_offload:
@@ -469,13 +469,13 @@ VLAN offload
 
 Supports VLAN offload to hardware.
 
-* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses]       rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses]       rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[uses]       mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
 * **[implements] eth_dev_ops**: ``vlan_offload_set``.
 * **[provides]   mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides]   rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
 * **[related]    API**: ``rte_eth_dev_set_vlan_offload()``,
   ``rte_eth_dev_get_vlan_offload()``.
 
@@ -487,14 +487,14 @@ QinQ offload
 
 Supports QinQ (queue in queue) offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
   ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
   ``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
 
 
 .. _nic_features_fec:
@@ -508,7 +508,7 @@ information to correct the bit errors generated during data packet transmission
 improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
 
 * **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides]   rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides]   rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
 * **[related]    API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
 
 
@@ -519,16 +519,16 @@ L3 checksum offload
 
 Supports L3 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
 * **[uses]     mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
   ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
   ``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
 
 
 .. _nic_features_l4_checksum_offload:
@@ -538,8 +538,8 @@ L4 checksum offload
 
 Supports L4 checksum offload.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
   ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -547,8 +547,8 @@ Supports L4 checksum offload.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
   ``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
 
 .. _nic_features_hw_timestamp:
 
@@ -557,10 +557,10 @@ Timestamp offload
 
 Supports Timestamp.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
 * **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
 * **[related] eth_dev_ops**: ``read_clock``.
 
 .. _nic_features_macsec_offload:
@@ -570,11 +570,11 @@ MACsec offload
 
 Supports MACsec.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
 
 
 .. _nic_features_inner_l3_checksum:
@@ -584,16 +584,16 @@ Inner L3 checksum
 
 Supports inner packet L3 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
   ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
 
 
 .. _nic_features_inner_l4_checksum:
@@ -603,15 +603,15 @@ Inner L4 checksum
 
 Supports inner packet L4 checksum.
 
-* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
   ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses]     rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
   ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
 * **[uses]     mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
-  ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+  ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
 
 
 .. _nic_features_shared_rx_queue:
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index ed6afd62703d..bba53f5a64ee 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
 To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
 will be checked:
 
-*   ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+*   ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
 
-*   ``DEV_RX_OFFLOAD_CHECKSUM``
+*   ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
 
-*   ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+*   ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
 
 *   ``fdir_conf->mode``
 
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41bb4..a1e236ad75e5 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -216,21 +216,21 @@ For example,
     *   If the max number of VFs (max_vfs) is set in the range of 1 to 32:
 
         If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
 
         If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
-        pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+        pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
 
     *   If the max number of VFs (max_vfs) is in the range of 33 to 64:
 
         If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
         as ``rxq`` is not correct at this case;
 
-        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+        If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
         and each VF have 2 Rx queues;
 
-    On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
-    or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+    On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+    or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
     It also needs config VF RSS information like hash function, RSS key, RSS key length.
 
 .. note::
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 20a74b9b5bcd..148d2f5fc2be 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
 
 To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
 
-*   DEV_RX_OFFLOAD_VLAN_STRIP
+*   RTE_ETH_RX_OFFLOAD_VLAN_STRIP
 
-*   DEV_RX_OFFLOAD_VLAN_EXTEND
+*   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
 
-*   DEV_RX_OFFLOAD_CHECKSUM
+*   RTE_ETH_RX_OFFLOAD_CHECKSUM
 
-*   DEV_RX_OFFLOAD_HEADER_SPLIT
+*   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
 
 *   dev_conf
 
@@ -163,13 +163,13 @@ l3fwd
 ~~~~~
 
 When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
 Otherwise, by default, RX vPMD is disabled.
 
 load_balancer
 ~~~~~~~~~~~~~
 
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
 In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
 
 
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index dd059b227d8e..86927a0b56b0 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -371,7 +371,7 @@ Limitations
 
 - CRC:
 
-  - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+  - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
@@ -611,7 +611,7 @@ Driver options
   small-packet traffic.
 
   When MPRQ is enabled, MTU can be larger than the size of
-  user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+  user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
   configure large stride size enough to accommodate MTU as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 3ce696b605d1..681010d9ed7d 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
 be added in next releases
 
 TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
 **Known limitation:** TAP supports all of the above hash functions together
 and not in partial combinations.
 
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 7bff0aef0b74..9b2c31a2f0bc 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
 
    - the bit mask of required GSO types. The GSO library uses the same macros as
      those that describe a physical device's TX offloading capabilities (i.e.
-     ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+     ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
      wants to segment TCP/IPv4 packets, it should set gso_types to
-     ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
-     supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
-     ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+     ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+     supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+     ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
      allowed.
 
    - a flag, that indicates whether the IPv4 headers of output segments should
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 2f190b40e43a..dc6186a44ae2 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
     mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
     set out_ip checksum to 0 in the packet
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
 
 - calculate checksum of out_ip and out_udp::
 
@@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
     set out_ip checksum to 0 in the packet
     set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
-  and DEV_TX_OFFLOAD_UDP_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+  and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
 
 - calculate checksum of in_ip::
 
@@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
 
   This is similar to case 1), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of in_ip and in_tcp::
@@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
   This is similar to case 2), but l2_len is different. It is supported
-  on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
-  DEV_TX_OFFLOAD_TCP_CKSUM.
+  on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+  RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
   Note that it can only work if outer L4 checksum is 0.
 
 - segment inner TCP::
@@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
     set in_tcp checksum to pseudo header without including the IP
       payload length using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
   Note that it can only work if outer L4 checksum is 0.
 
 - calculate checksum of out_ip, in_ip, in_tcp::
@@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
     set in_ip checksum to 0 in the packet
     set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
 
-  This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
-  DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+  This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+  RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
 
 The list of flags and their precise meaning is described in the mbuf API
 documentation (rte_mbuf.h). Also refer to the testpmd source code
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 0d4ac77a7ccf..68312898448c 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
 
 Avoiding lock contention is a key issue in a multi-core environment.
 To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
 In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
 
 To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
 
 Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
 
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
 concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
 
 *  Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
 *  In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
    enables more scaling as all workers can send the packets.
 
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
 
 Device Identification, Ownership and Configuration
 --------------------------------------------------
@@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
 The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
 Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
 ``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index a2169517c3f9..d798adb83e1d 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1993,23 +1993,23 @@ only matching traffic goes through.
 
 .. table:: RSS
 
-   +---------------+---------------------------------------------+
-   | Field         | Value                                       |
-   +===============+=============================================+
-   | ``func``      | RSS hash function to apply                  |
-   +---------------+---------------------------------------------+
-   | ``level``     | encapsulation level for ``types``           |
-   +---------------+---------------------------------------------+
-   | ``types``     | specific RSS hash types (see ``ETH_RSS_*``) |
-   +---------------+---------------------------------------------+
-   | ``key_len``   | hash key length in bytes                    |
-   +---------------+---------------------------------------------+
-   | ``queue_num`` | number of entries in ``queue``              |
-   +---------------+---------------------------------------------+
-   | ``key``       | hash key                                    |
-   +---------------+---------------------------------------------+
-   | ``queue``     | queue indices to use                        |
-   +---------------+---------------------------------------------+
+   +---------------+-------------------------------------------------+
+   | Field         | Value                                           |
+   +===============+=================================================+
+   | ``func``      | RSS hash function to apply                      |
+   +---------------+-------------------------------------------------+
+   | ``level``     | encapsulation level for ``types``               |
+   +---------------+-------------------------------------------------+
+   | ``types``     | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+   +---------------+-------------------------------------------------+
+   | ``key_len``   | hash key length in bytes                        |
+   +---------------+-------------------------------------------------+
+   | ``queue_num`` | number of entries in ``queue``                  |
+   +---------------+-------------------------------------------------+
+   | ``key``       | hash key                                        |
+   +---------------+-------------------------------------------------+
+   | ``queue``     | queue indices to use                            |
+   +---------------+-------------------------------------------------+
 
 Action: ``PF``
 ^^^^^^^^^^^^^^
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index ad92c16868c1..46c9b51d1bf9 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
 
 For Inline Crypto and Inline protocol offload, device specific defined metadata is
 updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
 
 For inline protocol offloaded ingress traffic, the application can register a
 pointer, ``userdata`` , in the security session. When the packet is received,
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index cc2b89850b07..f11550dc78ac 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -69,22 +69,16 @@ Deprecation Notices
   ``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
   type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
   usage in following public struct hierarchy:
-  ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+  ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
   Need to identify this kind of usages and fix in 20.11, otherwise this blocks
   us extending existing enum/define.
   One solution can be using a fixed size array instead of ``.*MAX.*`` value.
 
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
-  Macros will be added for backward compatibility.
-  Backward compatibility macros will be removed on v22.11.
-  A few old backward compatibility macros from 2013 that does not have
-  proper prefix will be removed on v21.11.
-
 * ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
   and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
   will be removed in DPDK 20.11.
 
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
   This will allow application to enable or disable PMDs from updating
   ``rte_mbuf::hash::fdir``.
   This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 569d3c00b9ee..b327c2bfca1c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -446,6 +446,9 @@ ABI Changes
 * bbdev: Added capability related to more comprehensive CRC options,
   shifting values of the ``enum rte_bbdev_op_ldpcdec_flag_bitmasks``.
 
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+  updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
 
 Known Issues
 ------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 78171b25f96e..782574dd39d5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -209,12 +209,12 @@ Where:
     device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
 
 *   ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the RX HW offload capabilities.
     By default all HW RX offloads are enabled.
 
 *   ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
-    (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+    (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
     allows user to disable some of the TX HW offload capabilities.
     By default all HW TX offloads are enabled.
 
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index d23e0b6a7a2e..30edef07ea20 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -546,7 +546,7 @@ The command line options are:
     Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
     The default value is 0x7::
 
-       ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+       RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
 
 *   ``--record-core-cycles``
 
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index be52e6f72dab..a922988607ef 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
 struct usdpaa_ioctl_link_status_args_old {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
-	/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+	/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
 	int     link_autoneg;
 
 };
@@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
 struct usdpaa_ioctl_update_link_status_args {
 	/* network device node name */
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link status(ETH_LINK_UP/DOWN) */
+	/* link status(RTE_ETH_LINK_UP/DOWN) */
 	int     link_status;
 };
 
 struct usdpaa_ioctl_update_link_speed {
 	/* network device node name*/
 	char    if_name[IF_NAME_MAX_LEN];
-	/* link speed (ETH_SPEED_NUM_)*/
+	/* link speed (RTE_ETH_SPEED_NUM_)*/
 	int     link_speed;
-	/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+	/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
 	int     link_duplex;
 };
 
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index ef85073b17e1..e13d55713625 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -167,7 +167,7 @@ enum roc_npc_rss_hash_function {
 struct roc_npc_action_rss {
 	enum roc_npc_rss_hash_function func;
 	uint32_t level;
-	uint64_t types;	       /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types;	       /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len;      /**< Hash key length in bytes. */
 	uint32_t queue_num;    /**< Number of entries in @p queue. */
 	const uint8_t *key;    /**< Hash key. */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index a077376dc0fb..8f778f0c2419 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		internals->tx_queue[i].sockfd = -1;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 	const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
 	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	return 0;
 }
 
@@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
 	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index b362ccdcd38c..e156246f24df 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -163,10 +163,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
@@ -652,7 +652,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -661,7 +661,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c7a..b618cba3f023 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
 		.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
 
 	/* ARK PMD supports all line rates, how do we indicate that here ?? */
-	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-				ETH_LINK_SPEED_10G |
-				ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G);
-
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+	dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+				RTE_ETH_LINK_SPEED_10G |
+				RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G);
+
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return 0;
 }
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5a198f53fce7..f7bfac796c07 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
 	.remove = eth_atl_pci_remove,
 };
 
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
-			| DEV_RX_OFFLOAD_IPV4_CKSUM \
-			| DEV_RX_OFFLOAD_UDP_CKSUM \
-			| DEV_RX_OFFLOAD_TCP_CKSUM \
-			| DEV_RX_OFFLOAD_MACSEC_STRIP \
-			| DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
-			| DEV_TX_OFFLOAD_IPV4_CKSUM \
-			| DEV_TX_OFFLOAD_UDP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_CKSUM \
-			| DEV_TX_OFFLOAD_TCP_TSO \
-			| DEV_TX_OFFLOAD_MACSEC_INSERT \
-			| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
+			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define SFP_EEPROM_SIZE 0x100
 
@@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
 	/* set adapter started */
 	hw->adapter_stopped = 0;
 
-	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(ERR,
 		"Invalid link_speeds for port %u, fix speed not supported",
 				dev->data->port_id);
@@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
 	uint32_t speed_mask = 0;
 
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
 	} else {
-		if (link_speeds & ETH_LINK_SPEED_10G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed_mask |= AQ_NIC_RATE_10G;
-		if (link_speeds & ETH_LINK_SPEED_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed_mask |= AQ_NIC_RATE_5G;
-		if (link_speeds & ETH_LINK_SPEED_1G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed_mask |= AQ_NIC_RATE_1G;
-		if (link_speeds & ETH_LINK_SPEED_2_5G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed_mask |=  AQ_NIC_RATE_2G5;
-		if (link_speeds & ETH_LINK_SPEED_100M)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed_mask |= AQ_NIC_RATE_100M;
 	}
 
@@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
-	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 
 	return 0;
 }
@@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 	u32 fc = AQ_NIC_FC_OFF;
 	int err = 0;
 
-	link.link_status = ETH_LINK_DOWN;
+	link.link_status = RTE_ETH_LINK_DOWN;
 	link.link_speed = 0;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 	memset(&old, 0, sizeof(old));
 
 	/* load old link status */
@@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
 		return 0;
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = hw->aq_link_status.mbps;
 
 	rte_eth_linkstatus_set(dev, &link);
@@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	hw->aq_fw_ops->get_flow_control(hw, &fc);
 
 	if (fc == AQ_NIC_FC_OFF)
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (fc & AQ_NIC_FC_RX)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (fc & AQ_NIC_FC_TX)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 
 	return 0;
 }
@@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (hw->aq_fw_ops->set_flow_control == NULL)
 		return -ENOTSUP;
 
-	if (fc_conf->mode == RTE_FC_NONE)
+	if (fc_conf->mode == RTE_ETH_FC_NONE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
-	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
-	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
-	else if (fc_conf->mode == RTE_FC_FULL)
+	else if (fc_conf->mode == RTE_ETH_FC_FULL)
 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
 
 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
@@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
 
-	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
 
-	if (mask & ETH_VLAN_EXTEND_MASK)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
 		ret = -ENOTSUP;
 
 	return ret;
@@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 	PMD_INIT_FUNC_TRACE();
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
 		break;
 	default:
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index fbc9917ed30d..ed9ef9f0cc52 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -11,15 +11,15 @@
 #include "hw_atl/hw_atl_utils.h"
 
 #define ATL_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define ATL_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct atl_adapter *)adapter)->hw)
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 0d3460383a50..2ff426892df2 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
 	rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_IPV4_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-		(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
 	/* allocate memory for the software ring */
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 932ec90265cf..5d94db02c506 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
 	/* Setup required number of queues */
 	_avp_set_queue_counts(eth_dev);
 
-	mask = (ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+	mask = (RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 	ret = avp_vlan_offload_set(eth_dev, mask);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-	link->link_speed = ETH_SPEED_NUM_10G;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_speed = RTE_ETH_SPEED_NUM_10G;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
 	return -1;
@@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
 	dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
 	if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	}
 
 	return 0;
@@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 	uint64_t offloads = dev_conf->rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-			if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 				avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
 			else
 				avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
 	}
 
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index ca32ad641873..3aaa2193272f 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
 	pdata->rss_hf = rss_conf->rss_hf;
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
-	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 }
 
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 0250256830ac..dab0c6775d1d 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
 	struct axgbe_port *pdata =  dev->data->dev_private;
 	/* Checksum offload to hardware */
 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_CHECKSUM;
+				RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return 0;
 }
 
@@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		pdata->rss_enable = 1;
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		pdata->rss_enable = 0;
 	else
 		return  -1;
@@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
 
 	max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 				max_pkt_len > pdata->rx_buf_size)
 		dev_data->scattered_rx = 1;
 
@@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
 			continue;
 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+	if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
 	if (pdata->rss_hf &
-	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+	    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
 	/* Set the RSS options */
@@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
 	link.link_status = pdata->phy_link;
 	link.link_speed = pdata->phy_speed;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
 		PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_SCATTER	  |
-		DEV_RX_OFFLOAD_KEEP_CRC;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_SCATTER	  |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	fc.autoneg = pdata->pause_autoneg;
 
 	if (pdata->rx_pause && pdata->tx_pause)
-		fc.mode = RTE_FC_FULL;
+		fc.mode = RTE_ETH_FC_FULL;
 	else if (pdata->rx_pause)
-		fc.mode = RTE_FC_RX_PAUSE;
+		fc.mode = RTE_ETH_FC_RX_PAUSE;
 	else if (pdata->tx_pause)
-		fc.mode = RTE_FC_TX_PAUSE;
+		fc.mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc.mode = RTE_FC_NONE;
+		fc.mode = RTE_ETH_FC_NONE;
 
 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	AXGMAC_IOWRITE(pdata, reg, reg_val);
 	fc.mode = fc_conf->mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 	} else {
@@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	fc.mode = pfc_conf->fc.mode;
 
-	if (fc.mode == RTE_FC_FULL) {
+	if (fc.mode == RTE_ETH_FC_FULL) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_RX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
 		pdata->tx_pause = 0;
 		pdata->rx_pause = 1;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-	} else if (fc.mode == RTE_FC_TX_PAUSE) {
+	} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
 		pdata->tx_pause = 1;
 		pdata->rx_pause = 0;
 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+	case RTE_ETH_VLAN_TYPE_INNER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
 		if (qinq) {
 			if (tpid != 0x8100 && tpid != 0x88a8)
 				PMD_DRV_LOG(ERR,
@@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    "Inner type not supported in single tag\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
-		PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
 		if (qinq) {
 			PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
 			/*Enable outer VLAN tag*/
@@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 					    "tag supported 0x8100/0x88A8\n");
 		}
 		break;
-	case ETH_VLAN_TYPE_MAX:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+	case RTE_ETH_VLAN_TYPE_MAX:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
 		break;
-	case ETH_VLAN_TYPE_UNKNOWN:
-		PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+	case RTE_ETH_VLAN_TYPE_UNKNOWN:
+		PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
 		break;
 	}
 	return 0;
@@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_stripping(pdata);
@@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_stripping(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
 				    pdata->eth_dev->device->name);
 			pdata->hw_if.enable_rx_vlan_filtering(pdata);
@@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			pdata->hw_if.disable_rx_vlan_filtering(pdata);
 		}
 	}
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
 			axgbe_vlan_extend_enable(pdata);
 			/* Set global registers with default ethertype*/
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					    RTE_ETHER_TYPE_VLAN);
-			axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					    RTE_ETHER_TYPE_VLAN);
 		} else {
 			PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe4d..0a3e1c59df1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -97,12 +97,12 @@
 
 /* Receive Side Scaling */
 #define AXGBE_RSS_OFFLOAD  ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define AXGBE_RSS_HASH_KEY_SIZE		40
 #define AXGBE_RSS_MAX_TABLE_SIZE	256
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae74..59fa9175aded 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
 		pdata->an_int = 0;
 		axgbe_an73_clear_interrupts(pdata);
 		pdata->eth_dev->data->dev_link.link_status =
-			ETH_LINK_DOWN;
+			RTE_ETH_LINK_DOWN;
 	} else if (pdata->an_state == AXGBE_AN_ERROR) {
 		PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
 			    cur_state);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index c8618d2d6daa..aa2c27ebaa49 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		(DMA_CH_INC * rxq->queue_id));
 	rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
 						  DMA_CH_RDTR_LO);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 567ea2382864..78fc717ec44a 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
 	link.link_speed = sc->link_vars.line_speed;
 	switch (sc->link_vars.duplex) {
 		case DUPLEX_FULL:
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			break;
 		case DUPLEX_HALF:
-			link.link_duplex = ETH_LINK_HALF_DUPLEX;
+			link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 			break;
 	}
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+		 RTE_ETH_LINK_SPEED_FIXED);
 	link.link_status = sc->link_vars.link_up;
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 				"VF device is no longer operational");
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	}
 
 	return ret;
@@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
 
 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 	bnx2x_load_firmware(sc);
 	assert(sc->firmware);
 
-	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		sc->udp_rss = 1;
 
 	sc->rx_budget = BNX2X_RX_BUDGET;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 6743cf92b0e6..39bd739c7bc9 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -569,37 +569,37 @@ struct bnxt_rep_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_ETH_RSS_SUPPORT (	\
-	ETH_RSS_IPV4 |		\
-	ETH_RSS_NONFRAG_IPV4_TCP |	\
-	ETH_RSS_NONFRAG_IPV4_UDP |	\
-	ETH_RSS_IPV6 |		\
-	ETH_RSS_NONFRAG_IPV6_TCP |	\
-	ETH_RSS_NONFRAG_IPV6_UDP |	\
-	ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_CKSUM | \
-				     DEV_TX_OFFLOAD_UDP_CKSUM | \
-				     DEV_TX_OFFLOAD_TCP_TSO | \
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
-				     DEV_TX_OFFLOAD_QINQ_INSERT | \
-				     DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
-				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_TCP_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-				     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
-				     DEV_RX_OFFLOAD_KEEP_CRC | \
-				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO | \
-				     DEV_RX_OFFLOAD_SCATTER | \
-				     DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RSS_IPV4 |		\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
+	RTE_ETH_RSS_IPV6 |		\
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
+	RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+				     RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+				     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+				     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+				     RTE_ETH_RX_OFFLOAD_SCATTER | \
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index f385723a9f65..2791a5c62db1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 		goto err_out;
 
 	/* Alloc RSS context only if RSS mode is enabled */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
 
 		/* RSS table size in Thor is 512.
@@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	 * setting is not available at this time, it will not be
 	 * configured correctly in the CFA.
 	 */
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 
 	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
-				    (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+				    (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
 				    true : false);
 	if (rc)
 		goto err_out;
@@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
 		link_speed = bp->link_info->support_pam4_speeds;
 
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
-		speed_capa |= ETH_LINK_SPEED_2_5G;
+		speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
-		speed_capa |= ETH_LINK_SPEED_20G;
+		speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	if (bp->link_info->auto_mode ==
 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
 				    dev_info->tx_queue_offload_capa;
 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	 */
 
 	/* VMDq resources */
-	vpool = 64; /* ETH_64_POOLS */
-	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	vpool = 64; /* RTE_ETH_64_POOLS */
+	vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
 	for (i = 0; i < 4; vpool >>= 1, i++) {
 		if (max_vnics > vpool) {
 			for (j = 0; j < 5; vrxq >>= 1, j++) {
@@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
 		goto resource_error;
 
-	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
 		goto resource_error;
 
 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
 	bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
 			eth_dev->data->port_id,
 			(uint32_t)link->link_speed,
-			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			("full-duplex") : ("half-duplex\n"));
 	else
 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return 1;
 
-	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		return 1;
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 	 * a limited subset have been enabled.
 	 */
 	if (eth_dev->data->dev_conf.rxmode.offloads &
-		~(DEV_RX_OFFLOAD_VLAN_STRIP |
-		  DEV_RX_OFFLOAD_KEEP_CRC |
-		  DEV_RX_OFFLOAD_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_TCP_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		  DEV_RX_OFFLOAD_RSS_HASH |
-		  DEV_RX_OFFLOAD_VLAN_FILTER))
+		~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		  RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		  RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
 		goto use_scalar_rx;
 
 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
 	 * or tx offloads.
 	 */
 	if (eth_dev->data->scattered_rx ||
-	    (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+	    (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
 	    BNXT_TRUFLOW_EN(bp))
 		goto use_scalar_tx;
 
@@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bnxt_link_update_op(eth_dev, 1);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		vlan_mask |= ETH_VLAN_FILTER_MASK;
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		vlan_mask |= ETH_VLAN_STRIP_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
 	if (rc)
 		goto error;
@@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		/* Retrieve link info from hardware */
 		rc = bnxt_get_hwrm_link_config(bp, &new);
 		if (rc) {
-			new.link_speed = ETH_LINK_SPEED_100M;
-			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			new.link_speed = RTE_ETH_LINK_SPEED_100M;
+			new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR,
 				"Failed to retrieve link rc = 0x%x!\n", rc);
 			goto out;
@@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	if (!vnic->rss_table)
 		return -EINVAL;
 
-	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return -EINVAL;
 
 	if (reta_size != tbl_size) {
@@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < reta_size; i++) {
 		struct bnxt_rx_queue *rxq;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (!(reta_conf[idx].mask & (1ULL << sft)))
 			continue;
@@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 	}
 
 	for (idx = 0, i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		sft = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		sft = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
@@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	 * If RSS enablement were different than dev_configure,
 	 * then return -EINVAL
 	 */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (!rss_conf->rss_hf)
 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
 	} else {
@@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
 	vnic->hash_mode =
 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
-					    ETH_RSS_LEVEL(rss_conf->rss_hf));
+					    RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
 
 	/*
 	 * If hashkey is not specified, use the previously configured
@@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 		hash_types = vnic->hash_type;
 		rss_conf->rss_hf = 0;
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_IPV4;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_IPV6;
+			rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
 		}
 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
-			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+			rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 			hash_types &=
 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 		}
@@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 		fc_conf->autoneg = 1;
 	switch (bp->link_info->pause) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	}
 	return 0;
@@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		bp->link_info->auto_pause = 0;
 		bp->link_info->force_pause = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
 		}
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
 		}
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		if (fc_conf->autoneg) {
 			bp->link_info->auto_pause =
 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
 		tunnel_type =
 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
 				udp_tunnel->udp_port);
@@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 		return rc;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (!bp->vxlan_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
 		port = bp->vxlan_fw_dst_port_id;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (!bp->geneve_port_cnt) {
 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
 			return -EINVAL;
@@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 	int rc;
 
 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
-	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		/* Remove any VLAN filters programmed */
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
@@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
 		bnxt_add_vlan_filter(bp, 0);
 	}
 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
 
 	return 0;
 }
@@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
 
 	/* Destroy vnic filters and vnic */
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
 			bnxt_del_vlan_filter(bp, i);
 	}
@@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_VLAN_FILTER) {
+	    RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		rc = bnxt_add_vlan_filter(bp, 0);
 		if (rc)
 			return rc;
@@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
-		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+		    !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
 
 	return rc;
 }
@@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 	if (!dev->data->dev_started)
 		return 0;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering */
 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
 		if (rc)
 			return rc;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
 		else
 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 {
 	struct bnxt *bp = dev->data->dev_private;
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
-	if (vlan_type != ETH_VLAN_TYPE_INNER &&
-	    vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	    vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		return -EINVAL;
 	}
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		switch (tpid) {
 		case RTE_ETHER_TYPE_QINQ:
 			bp->outer_tpid_bd =
@@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 		}
 		bp->outer_tpid_bd |= tpid;
 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer vlan in QinQ\n");
 		return -EINVAL;
@@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
 	bnxt_del_dflt_mac_filter(bp, vnic);
 
 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		/* This filter will allow only untagged packets */
 		rc = bnxt_add_vlan_filter(bp, 0);
 	} else {
@@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index b2ebb5634e3a..ced697a73980 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		vnic->vlan_strip = true;
 	else
 		vnic->vlan_strip = false;
@@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
 	}
 
 	/* If RSS types is 0, use a best effort configuration */
-	types = rss->types ? rss->types : ETH_RSS_IPV4;
+	types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
 
 	hash_type = bnxt_rte_to_hwrm_hash_types(types);
 
@@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
 
 		rxq = bp->rx_queues[act_q->index];
 
-		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+		if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
 			goto use_vnic;
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 181e607d7bf8..82e89b7c8af7 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
 	uint16_t j = dst_id - 1;
 
 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
-	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+	if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
 	    conf->pool_map[j].pools & (1UL << j)) {
 		PMD_DRV_LOG(DEBUG,
 			"Add vlan %u to vmdq pool %u\n",
@@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
 {
 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
-	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+	if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
 
 	switch (conf_link_speed) {
-	case ETH_LINK_SPEED_10M_HD:
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
 	}
@@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 {
 	uint16_t eth_link_speed = 0;
 
-	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
-		return ETH_LINK_SPEED_AUTONEG;
+	if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+		return RTE_ETH_LINK_SPEED_AUTONEG;
 
-	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_100M:
-	case ETH_LINK_SPEED_100M_HD:
+	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		/* FALLTHROUGH */
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
 		break;
-	case ETH_LINK_SPEED_2_5G:
+	case RTE_ETH_LINK_SPEED_2_5G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
 		break;
-	case ETH_LINK_SPEED_20G:
+	case RTE_ETH_LINK_SPEED_20G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		eth_link_speed = pam4_link ?
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		eth_link_speed =
 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 		break;
@@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
 	return eth_link_speed;
 }
 
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
-		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
-		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
-		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
-		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+		RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+		RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+		RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+		RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
 
 static int bnxt_validate_link_speed(struct bnxt *bp)
 {
@@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
 	uint32_t link_speed_capa;
 	uint32_t one_speed;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG)
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
 		return 0;
 
 	link_speed_capa = bnxt_get_speed_capabilities(bp);
 
-	if (link_speed & ETH_LINK_SPEED_FIXED) {
-		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+	if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+		one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
 
 		if (one_speed & (one_speed - 1)) {
 			PMD_DRV_LOG(ERR,
@@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
 {
 	uint16_t ret = 0;
 
-	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+	if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
 		if (bp->link_info->support_speeds)
 			return bp->link_info->support_speeds;
 		link_speed = BNXT_SUPPORTED_SPEEDS;
 	}
 
-	if (link_speed & ETH_LINK_SPEED_100M)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_100M_HD)
+	if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
-	if (link_speed & ETH_LINK_SPEED_1G)
+	if (link_speed & RTE_ETH_LINK_SPEED_1G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
-	if (link_speed & ETH_LINK_SPEED_2_5G)
+	if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
-	if (link_speed & ETH_LINK_SPEED_10G)
+	if (link_speed & RTE_ETH_LINK_SPEED_10G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
-	if (link_speed & ETH_LINK_SPEED_20G)
+	if (link_speed & RTE_ETH_LINK_SPEED_20G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
-	if (link_speed & ETH_LINK_SPEED_25G)
+	if (link_speed & RTE_ETH_LINK_SPEED_25G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
-	if (link_speed & ETH_LINK_SPEED_40G)
+	if (link_speed & RTE_ETH_LINK_SPEED_40G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
-	if (link_speed & ETH_LINK_SPEED_50G)
+	if (link_speed & RTE_ETH_LINK_SPEED_50G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
-	if (link_speed & ETH_LINK_SPEED_100G)
+	if (link_speed & RTE_ETH_LINK_SPEED_100G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
-	if (link_speed & ETH_LINK_SPEED_200G)
+	if (link_speed & RTE_ETH_LINK_SPEED_200G)
 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
 	return ret;
 }
 
 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 {
-	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+	uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	switch (hw_link_speed) {
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
-		eth_link_speed = ETH_SPEED_NUM_100M;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
-		eth_link_speed = ETH_SPEED_NUM_1G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
-		eth_link_speed = ETH_SPEED_NUM_2_5G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
-		eth_link_speed = ETH_SPEED_NUM_10G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
-		eth_link_speed = ETH_SPEED_NUM_20G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
-		eth_link_speed = ETH_SPEED_NUM_25G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
-		eth_link_speed = ETH_SPEED_NUM_40G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
-		eth_link_speed = ETH_SPEED_NUM_50G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
-		eth_link_speed = ETH_SPEED_NUM_100G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
-		eth_link_speed = ETH_SPEED_NUM_200G;
+		eth_link_speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
 	default:
@@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
 
 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
 {
-	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+	uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (hw_link_duplex) {
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
 		/* FALLTHROUGH */
-		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
-		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
 		link->link_speed =
 			bnxt_parse_hw_link_speed(link_info->link_speed);
 	else
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
 	link->link_status = link_info->link_up;
 	link->link_autoneg = link_info->auto_mode ==
 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
-		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+		RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
 exit:
 	return rc;
 }
@@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
 
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	if (BNXT_CHIP_P5(bp) &&
-	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+	    dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
 		/* 40G is not supported as part of media auto detect.
 		 * The speed should be forced and autoneg disabled
 		 * to configure 40G speed.
@@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
 
 	HWRM_CHECK_RESULT();
 
-	bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+	bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
 
 	svif_info = rte_le_to_cpu_16(resp->svif_info);
 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index b7e88e013a84..1c07db3ca9c5 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
 	if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 08cefa1baaef..7940d489a102 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 			rx_ring_info->rx_ring_struct->ring_size *
 			AGG_RING_SIZE_FACTOR)) : 0;
 
-		if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+		if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 			int tpa_max = BNXT_TPA_MAX_AGGS(bp);
 
 			tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 					    ag_bitmap_start, ag_bitmap_len);
 
 			/* TPA info */
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 				rx_ring_info->tpa_info =
 					((struct bnxt_tpa_info *)
 					 ((char *)mz->addr + tpa_info_start));
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 38ec4aa14b77..1456f8b54ffa 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 	bp->nr_vnics = 0;
 
 	/* Multi-queue mode */
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* FALLTHROUGH */
 			/* ETH_8/64_POOLs */
 			pools = conf->nb_queue_pools;
@@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 			max_pools = RTE_MIN(bp->max_vnics,
 					    RTE_MIN(bp->max_l2_ctx,
 					    RTE_MIN(bp->max_rsscos_ctx,
-						    ETH_64_POOLS)));
+						    RTE_ETH_64_POOLS)));
 			PMD_DRV_LOG(DEBUG,
 				    "pools = %u max_pools = %u\n",
 				    pools, max_pools);
 			if (pools > max_pools)
 				pools = max_pools;
 			break;
-		case ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_RSS:
 			pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
 			break;
 		default:
@@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 				    ring_idx, rxq, i, vnic);
 		}
 		if (i == 0) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
 				bp->eth_dev->data->promiscuous = 1;
 				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
 			}
@@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 		vnic->end_grp_id = end_grp_id;
 
 		if (i) {
-			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-			    !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+			if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+			    !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
 				vnic->rss_dflt_cr = true;
 			goto skip_filter_allocation;
 		}
@@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
 	bp->rx_num_qs_per_vnic = nb_q_per_grp;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
 		if (bp->flags & BNXT_FLAG_UPDATE_HASH)
 			bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
 
 		for (i = 0; i < bp->nr_vnics; i++) {
-			uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+			uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
 
 			vnic = &bp->vnic_info[i];
 			vnic->hash_type =
@@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
 	rxq->port_id = eth_dev->data->port_id;
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 	PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		vnic = rxq->vnic;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq->rx_started = false;
 	PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (BNXT_HAS_RING_GRPS(bp))
 			vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aeacc60a0127..eb555c4545e6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 	dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 	offloads = dev_conf->rxmode.offloads;
 
-	outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+	outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 
 	/* Initialize ol_flags table. */
 	pt = rxr->ol_flags_table;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
index d08854ff61e2..e4905b4fd169 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_avx2.c
@@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 9b9489a695a2..0627fd212d0a 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static inline void
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 13211060cf0e..f15e2d3b4ed4 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 6e563053260a..ffd560166cac 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 9e45ddd7a82e..f2fcaf53021c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 }
 
 /*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
  * is enabled.
  */
 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 	} while (nb_tx_pkts < ring_mask);
 
 	if (nb_tx_pkts) {
-		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp(txq, nb_tx_pkts);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 26253a7e17f2..c63cf4b943fa 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 {
 	uint16_t hwrm_type = 0;
 
-	if (rte_type & ETH_RSS_IPV4)
+	if (rte_type & RTE_ETH_RSS_IPV4)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
-	if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
-	if (rte_type & ETH_RSS_IPV6)
+	if (rte_type & RTE_ETH_RSS_IPV6)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
-	if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
 
 	return hwrm_type;
@@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
 int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
 {
 	uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
-	bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
-	bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP));
+	bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+	bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP));
 	bool l3_only = l3 && !l4;
 	bool l3_and_l4 = l3 && l4;
 
@@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
 	 * return default hash mode.
 	 */
 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
-		return ETH_RSS_LEVEL_PMD_DEFAULT;
+		return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
 	    mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
 	else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
 		 mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
-		rss_level |= ETH_RSS_LEVEL_INNERMOST;
+		rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
 	else
-		rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+		rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
 
 	return rss_level;
 }
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index f71543810970..77ecbef04c3d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
 	if (vf >= bp->pdev->max_vfs)
 		return -EINVAL;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
 		PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
 		return -ENOTSUP;
 	}
 
 	/* Is this really the correct mapping?  VFd seems to think it is. */
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		flag |= BNXT_VNIC_INFO_PROMISC;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		flag |= BNXT_VNIC_INFO_BCAST;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
 
 	if (on)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index fc179a2732ac..8b104b639184 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -167,8 +167,8 @@ struct bond_dev_private {
 	struct rte_eth_desc_lim tx_desc_lim;	/**< Tx descriptor limits */
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[52];				/**< 52-byte hash key buffer. */
 	uint8_t rss_key_len;				/**< hash key length in bytes. */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2029955c1092..ca50583d62d8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
 	uint16_t key_speed;
 
 	switch (speed) {
-	case ETH_SPEED_NUM_NONE:
+	case RTE_ETH_SPEED_NUM_NONE:
 		key_speed = 0x00;
 		break;
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		key_speed = BOND_LINK_SPEED_KEY_10M;
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		key_speed = BOND_LINK_SPEED_KEY_100M;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		key_speed = BOND_LINK_SPEED_KEY_1000M;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		key_speed = BOND_LINK_SPEED_KEY_10G;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		key_speed = BOND_LINK_SPEED_KEY_20G;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		key_speed = BOND_LINK_SPEED_KEY_40G;
 		break;
 	default:
@@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
 		if (ret >= 0 && link_info.link_status != 0) {
 			key = link_speed_key(link_info.link_speed) << 1;
-			if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+			if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
 				key |= BOND_LINK_FULL_DUPLEX_KEY;
 		} else {
 			key = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 5140ef14c2ee..84943cffe2bb 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
 
 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
 	if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
 		return 0;
 
 	internals = bonded_eth_dev->data->dev_private;
@@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
 			return -1;
 		}
 
-		 if (link_props.link_status == ETH_LINK_UP) {
+		if (link_props.link_status == RTE_ETH_LINK_UP) {
 			if (internals->active_slave_count == 0 &&
 			    !internals->user_defined_primary_port)
 				bond_ethdev_primary_set(internals,
@@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
 		internals->tx_offload_capa = 0;
 		internals->rx_queue_offload_capa = 0;
 		internals->tx_queue_offload_capa = 0;
-		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+		internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 		internals->reta_size = 0;
 		internals->candidate_max_rx_pktlen = 0;
 		internals->max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 8d038ba6b6c4..834a5937b3aa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
 		 * In any other mode the link properties are set to default
 		 * values of AUTONEG/DUPLEX
 		 */
-		ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
-		ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	}
 }
 
@@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	/* If RSS is enabled for bonding, try to enable it for slaves  */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		/* rss_key won't be empty if RSS is configured in bonded dev */
 		slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
 					internals->rss_key_len;
@@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		slave_eth_dev->data->dev_conf.rxmode.offloads |=
-				DEV_RX_OFFLOAD_VLAN_FILTER;
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	else
 		slave_eth_dev->data->dev_conf.rxmode.offloads &=
-				~DEV_RX_OFFLOAD_VLAN_FILTER;
+				~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	slave_eth_dev->data->dev_conf.rxmode.mtu =
 			bonded_eth_dev->data->dev_conf.rxmode.mtu;
@@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 	}
 
 	/* If RSS is enabled for bonding, synchronize RETA */
-	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		int i;
 		struct bond_dev_private *internals;
 
@@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 		return -1;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 1;
 
 	internals = eth_dev->data->dev_private;
@@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 			tlb_last_obytets[internals->active_slaves[i]] = 0;
 	}
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
 
 	internals->link_status_polling_enabled = 0;
@@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 
 	bond_ctx = ethdev->data->dev_private;
 
-	ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	if (ethdev->data->dev_started == 0 ||
 			bond_ctx->active_slave_count == 0) {
-		ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 
-	ethdev->data->dev_link.link_status = ETH_LINK_UP;
+	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	if (wait_to_complete)
 		link_update = rte_eth_link_get;
@@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 					  &slave_link);
 			if (ret < 0) {
 				ethdev->data->dev_link.link_speed =
-					ETH_SPEED_NUM_NONE;
+					RTE_ETH_SPEED_NUM_NONE;
 				RTE_BOND_LOG(ERR,
 					"Slave (port %u) link get failed: %s",
 					bond_ctx->active_slaves[idx],
@@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 		 * In theses mode the maximum theoretical link speed is the sum
 		 * of all the slaves
 		 */
-		ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		one_link_update_succeeded = false;
 
 		for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 			goto link_update;
 
 		/* check link state properties if bonded link is up*/
-		if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+		if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 			if (link_properties_valid(bonded_eth_dev, &link) != 0)
 				RTE_BOND_LOG(ERR, "Invalid link properties "
 					     "for slave %d in bonding mode %d",
@@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
 		if (internals->active_slave_count < 1) {
 			/* If first active slave, then change link status */
 			bonded_eth_dev->data->dev_link.link_status =
-								ETH_LINK_UP;
+								RTE_ETH_LINK_UP;
 			internals->current_primary_port = port_id;
 			lsc_flag = 1;
 
@@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
-			RTE_RETA_GROUP_SIZE;
+	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+			RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < reta_count; i++) {
 		internals->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	 /* Copy RETA table */
-	for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
 
@@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 	internals->max_rx_pktlen = 0;
 
 	/* Initially allow to choose any offload type */
-	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
 
 	memset(&internals->default_rxconf, 0,
 	       sizeof(internals->default_rxconf));
@@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 	 * set key to the the value specified in port RSS configuration.
 	 * Fall back to default RSS key if the key is not specified
 	 */
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
 		struct rte_eth_rss_conf *rss_conf =
 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
 		if (rss_conf->rss_key != NULL) {
@@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
 
 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
 			internals->reta_conf[i].mask = ~0LL;
-			for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 				internals->reta_conf[i].reta[j] =
-						(i * RTE_RETA_GROUP_SIZE + j) %
+						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
 						dev->data->nb_rx_queues;
 		}
 	}
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 25da5f6691d0..f7eb0f437b77 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
diff --git a/drivers/net/cnxk/cn10k_rte_flow.c b/drivers/net/cnxk/cn10k_rte_flow.c
index 8c87452934eb..dff4c7746cf5 100644
--- a/drivers/net/cnxk/cn10k_rte_flow.c
+++ b/drivers/net/cnxk/cn10k_rte_flow.c
@@ -98,7 +98,7 @@ cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index d6af54b56de6..5d603514c045 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index eb962ef08cab..5e6c5ee11188 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 08c86f9e6b7b..17f8f6debbc8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-	    (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+	    (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
 	return flags;
@@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 
 	/* Platform specific checks */
 	if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	     (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	     (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		plt_err("Outer IP and SCTP checksum unsupported");
 		return -EINVAL;
 	}
@@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	 * TSO not supported for earlier chip revisions
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
-		dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+					  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 
 	/* 50G and 100G to be supported for board version C0
 	 * and above of CN9K.
 	 */
 	if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
-		dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+		dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 	}
 
 	dev->hwcap = 0;
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 5c4387e74e0b..8d504c4a6d92 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
-		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
 	}
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 }
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index e5691a2a7e16..f3f19fed9780 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 
 	if (dev->scalar_ena) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 	} else {
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
-		if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2e05d8bf1552..db54468dbca1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
 	if (roc_nix_is_vf_or_sdp(&dev->nix) ||
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	uint32_t speed_capa;
 
 	/* Auto negotiation disabled */
-	speed_capa = ETH_LINK_SPEED_FIXED;
+	speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-		speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			      ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			      ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			      RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			      RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return speed_capa;
@@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	struct roc_nix *nix = &dev->nix;
 	int i, rc = 0;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
@@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 		cnxk_nix_inb_mode_set(dev, true);
 	}
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		struct plt_bitmap *bmap;
 		size_t bmap_sz;
 		void *mem;
@@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 
 		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
 
-		/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
-		if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+		/* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
 			goto done;
 
 		rc = -ENOMEM;
@@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 done:
 	return 0;
 cleanup:
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		rc |= roc_nix_inl_inb_fini(nix);
 	return rc;
 }
@@ -182,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;
 
 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -199,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	}
 
 	/* Cleanup Inline outbound */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-	    dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+	    dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Destroy outbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -242,8 +242,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 }
 
@@ -273,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	struct rte_eth_fc_conf fc_conf = {0};
 	int rc;
 
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -281,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
@@ -305,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (roc_model_is_cn96_ax() &&
 	    dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-	    (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_cfg.mode =
-				(fc_cfg.mode == RTE_FC_FULL ||
-				fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_cfg.mode == RTE_ETH_FC_FULL ||
+				fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -352,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -380,7 +380,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* When Tx Security offload is enabled, increase tx desc count by
 	 * max possible outbound desc count.
 	 */
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		nb_desc += dev->outb.nb_desc;
 
 	/* Setup ROC SQ */
@@ -499,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * to avoid meta packet drop as LBK does not currently support
 	 * backpressure.
 	 */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
 		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
 
 		/* Use current RQ's aura limit if inl rq is not available */
@@ -561,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rxq_sp->qconf.nb_desc = nb_desc;
 	rxq_sp->qconf.mp = mp;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		/* Setup rq reference for inline dev if present */
 		rc = roc_nix_inl_dev_rq_get(rq);
 		if (rc)
@@ -579,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
 		rc = cnxk_nix_tsc_convert(dev);
 		if (rc) {
 			plt_err("Failed to calculate delta and freq mult");
@@ -618,7 +618,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);
 
 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		roc_nix_inl_dev_rq_put(rq);
 
 	/* Cleanup ROC RQ */
@@ -657,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->ethdev_rss_hf = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -683,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -746,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
 	uint64_t rss_hf;
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 
@@ -958,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
 	/* Nothing much to do if offload is not enabled */
 	if (!(dev->tx_offloads &
-	      (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	       DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+	      (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
 		return 0;
 
 	/* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -1007,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
@@ -1054,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	/* Prepare rx cfg */
 	rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
 	if (dev->rx_offloads &
-	    (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	    (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
 		rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
 	}
@@ -1062,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
 		   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
 		/* Disable drop re if rx offload security is enabled and
 		 * platform does not support it.
@@ -1454,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled on PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
 		cnxk_eth_dev_ops.timesync_enable(eth_dev);
 	else
 		cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		rc = rte_mbuf_dyn_rx_timestamp_register
 			(&dev->tstamp.tstamp_dynfield_offset,
 			 &dev->tstamp.rx_tstamp_dynflag);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 72f80ae948cf..29a3540ed3f8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -58,41 +58,44 @@
 	 CNXK_NIX_TX_NB_SEG_MAX)
 
 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
-	(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
-	 ETH_RSS_L4_DST_ONLY)
+	(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |                   \
+	 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 #define CNXK_NIX_RSS_OFFLOAD                                                   \
-	(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
-	 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
-	 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+	(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |                 \
+	 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL |             \
+	 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST |                 \
+	 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
 
 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
-	(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
-	 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
-	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
-	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
-	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+	(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |          \
+	 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT |             \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |                 \
+	 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |                  \
+	 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |        \
+	 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS |              \
+	 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
-	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
-	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
-	 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH |            \
-	 DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP |                \
-	 DEV_RX_OFFLOAD_SECURITY)
+	(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |         \
+	 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER |    \
+	 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
+	 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
+	 RTE_ETH_RX_OFFLOAD_SECURITY)
 
 #define RSS_IPV4_ENABLE                                                        \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE                                                        \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
-	 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |                            \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |         \
+	 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE                                                     \
-	(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+	(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS 3
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index c0b949e21ab0..e068f553495c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
 		val = ROC_NIX_RSS_RETA_SZ_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
 		val = ROC_NIX_RSS_RETA_SZ_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
 		val = ROC_NIX_RSS_RETA_SZ_256;
 	else
 		val = ROC_NIX_RSS_RETA_SZ_64;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index d0924df76152..67464302653d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-		{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-		{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-		{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-		{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-		{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-		{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-		{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-		{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-		{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-		{DEV_RX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-		{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-		{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
 						 "Scalar, Rx Offloads:"
@@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-		{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-		{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-		{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-		{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-		{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-		{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-		{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-		{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-		{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-		{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-		{DEV_TX_OFFLOAD_SECURITY, " Security,"},
-		{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-		{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-		{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
 	};
 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
 						 "Scalar, Tx Offloads:"
@@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	enum rte_eth_fc_mode mode_map[] = {
-					   RTE_FC_NONE, RTE_FC_RX_PAUSE,
-					   RTE_FC_TX_PAUSE, RTE_FC_FULL
+					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
 					  };
 	struct roc_nix *nix = &dev->nix;
 	int mode;
@@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		plt_err("Scatter offload is not enabled for mtu");
 		goto exit;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
 		plt_err("Greater than maximum supported packet length");
 		goto exit;
@@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 		goto fail;
 
 	/* Copy RETA table */
-	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = reta[idx];
 			idx++;
@@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 	if (rss_conf->rss_key)
 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c
index 6a7080167598..f10a502826c6 100644
--- a/drivers/net/cnxk/cnxk_link.c
+++ b/drivers/net/cnxk/cnxk_link.c
@@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		plt_info("Port %d: Link Up - speed %u Mbps - %s",
 			 (int)(eth_dev->data->port_id),
 			 (uint32_t)link->link_speed,
-			 link->link_duplex == ETH_LINK_FULL_DUPLEX
+			 link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 				 ? "full-duplex"
 				 : "half-duplex");
 	else
@@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
 
 	eth_link.link_status = link->status;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	/* Print link info */
@@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 		return 0;
 
 	if (roc_nix_is_lbk(&dev->nix)) {
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_100G;
-		link.link_autoneg = ETH_LINK_FIXED;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	} else {
 		rc = roc_nix_mac_link_info_get(&dev->nix, &info);
 		if (rc)
 			return rc;
 		link.link_status = info.status;
 		link.link_speed = info.speed;
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 		if (info.full_duplex)
 			link.link_duplex = info.full_duplex;
 	}
diff --git a/drivers/net/cnxk/cnxk_ptp.c b/drivers/net/cnxk/cnxk_ptp.c
index 449489f599c4..139fea256ccd 100644
--- a/drivers/net/cnxk/cnxk_ptp.c
+++ b/drivers/net/cnxk/cnxk_ptp.c
@@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 	dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	rc = roc_nix_ptp_rx_ena_dis(nix, true);
 	if (!rc) {
@@ -257,7 +257,7 @@ int
 cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+	uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	struct roc_nix *nix = &dev->nix;
 	int rc = 0;
 
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index dfc33ba8654a..b08d7c34faa9 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		plt_err("multi-queue mode is disabled");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 37625c5bfb69..dbcbfaf68a30 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -28,31 +28,31 @@
 #define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
 
 #define CXGBE_DEFAULT_RSS_KEY_LEN     40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-				ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
-				ETH_RSS_NONFRAG_IPV6_OTHER | \
-				ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
-				    ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
-				    ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+				RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+				    RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+				    RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
 
 /* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
-			   DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_TX_OFFLOAD_UDP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_CKSUM | \
-			   DEV_TX_OFFLOAD_TCP_TSO | \
-			   DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			   DEV_RX_OFFLOAD_IPV4_CKSUM | \
-			   DEV_RX_OFFLOAD_UDP_CKSUM | \
-			   DEV_RX_OFFLOAD_TCP_CKSUM | \
-			   DEV_RX_OFFLOAD_SCATTER | \
-			   DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+			   RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+			   RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+			   RTE_ETH_RX_OFFLOAD_SCATTER | \
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 /* Devargs filtermode and filtermask representation */
 enum cxgbe_devargs_filter_mode_flags {
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index f77b2976002c..4758321778d1 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 	}
 
 	new_link.link_status = cxgbe_force_linkup(adapter) ?
-			       ETH_LINK_UP : pi->link_cfg.link_ok;
+			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
@@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 			goto out;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 	else
 		eth_dev->data->scattered_rx = 0;
@@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
 	CXGBE_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
 		err = cxgbe_setup_sge_fwevtq(adapter);
@@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		rx_pause = 1;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	u8 tx_pause = 0, rx_pause = 0;
 	int ret;
 
-	if (fc_conf->mode == RTE_FC_FULL) {
+	if (fc_conf->mode == RTE_ETH_FC_FULL) {
 		tx_pause = 1;
 		rx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
 		tx_pause = 1;
-	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
 		rx_pause = 1;
 	}
 
@@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	}
 
 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (!(reta_conf[idx].mask & (1ULL << shift)))
 			continue;
 
@@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_100G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
 		}
@@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_50G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
 		}
@@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
 
 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
 		if (capa_arr) {
-			capa_arr[num].speed = ETH_SPEED_NUM_25G;
+			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 91d6bb9bbcb0..f1ac32270961 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
 	 * that step explicitly.
 	 */
 	ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
-			    !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+			    !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
 			    true);
 	if (ret == 0) {
 		ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
 	}
 
 	if (ret == 0 && cxgbe_force_linkup(adapter))
-		pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return ret;
 }
 
@@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
 	if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
 			 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
@@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
 {
 #define SET_SPEED(__speed_name) \
 	do { \
-		*speed_caps |= ETH_LINK_ ## __speed_name; \
+		*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
 	} while (0)
 
 #define FW_CAPS_TO_SPEED(__fw_name) \
@@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
 			      speed_caps);
 
 	if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
-		*speed_caps |= ETH_LINK_SPEED_FIXED;
+		*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
 }
 
 /**
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c79cdb8d8ad7..89ea7dd47c0b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -54,29 +54,29 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		DPAA_PMD_DEBUG("enabling scatter mode");
 		fman_if_set_sg(dev->process_private, 1);
 		dev->data->scattered_rx = 1;
@@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
 	/* Configure link only if link is UP*/
 	if (link->link_status) {
-		if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+		if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 			/* Start autoneg only if link is not in autoneg mode */
 			if (!link->link_autoneg)
 				dpaa_restart_link_autoneg(__fif->node_name);
-		} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-			switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-			case ETH_LINK_SPEED_10M_HD:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+		} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+			switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+			case RTE_ETH_LINK_SPEED_10M_HD:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10M:
-				speed = ETH_SPEED_NUM_10M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10M:
+				speed = RTE_ETH_SPEED_NUM_10M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M_HD:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_HALF_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M_HD:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_HALF_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_100M:
-				speed = ETH_SPEED_NUM_100M;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_100M:
+				speed = RTE_ETH_SPEED_NUM_100M;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_1G:
-				speed = ETH_SPEED_NUM_1G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_1G:
+				speed = RTE_ETH_SPEED_NUM_1G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_2_5G:
-				speed = ETH_SPEED_NUM_2_5G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_2_5G:
+				speed = RTE_ETH_SPEED_NUM_2_5G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
-			case ETH_LINK_SPEED_10G:
-				speed = ETH_SPEED_NUM_10G;
-				duplex = ETH_LINK_FULL_DUPLEX;
+			case RTE_ETH_LINK_SPEED_10G:
+				speed = RTE_ETH_SPEED_NUM_10G;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			default:
-				speed = ETH_SPEED_NUM_NONE;
-				duplex = ETH_LINK_FULL_DUPLEX;
+				speed = RTE_ETH_SPEED_NUM_NONE;
+				duplex = RTE_ETH_LINK_FULL_DUPLEX;
 				break;
 			}
 			/* Set link speed */
@@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
 	if (fif->mac_type == fman_mac_1g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G;
 	} else if (fif->mac_type == fman_mac_2_5g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G;
 	} else if (fif->mac_type == fman_mac_10g) {
-		dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-					| ETH_LINK_SPEED_10M
-					| ETH_LINK_SPEED_100M_HD
-					| ETH_LINK_SPEED_100M
-					| ETH_LINK_SPEED_1G
-					| ETH_LINK_SPEED_2_5G
-					| ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+					| RTE_ETH_LINK_SPEED_10M
+					| RTE_ETH_LINK_SPEED_100M_HD
+					| RTE_ETH_LINK_SPEED_100M
+					| RTE_ETH_LINK_SPEED_1G
+					| RTE_ETH_LINK_SPEED_2_5G
+					| RTE_ETH_LINK_SPEED_10G;
 	} else {
 		DPAA_PMD_ERR("invalid link_speed: %s, %d",
 			     dpaa_intf->name, fif->mac_type);
@@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-			{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+			{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
 	};
 
 	/* Update Rx offload info */
@@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 			ret = dpaa_get_link_status(__fif->node_name, link);
 			if (ret)
 				return ret;
-			if (link->link_status == ETH_LINK_DOWN &&
+			if (link->link_status == RTE_ETH_LINK_DOWN &&
 			    wait_to_complete)
 				rte_delay_ms(CHECK_INTERVAL);
 			else
@@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
 	}
 
 	if (ioctl_version < 2) {
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
-		link->link_autoneg = ETH_LINK_AUTONEG;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 		if (fif->mac_type == fman_mac_1g)
-			link->link_speed = ETH_SPEED_NUM_1G;
+			link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		else if (fif->mac_type == fman_mac_2_5g)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else if (fif->mac_type == fman_mac_10g)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
 			DPAA_PMD_ERR("invalid link_speed: %s, %d",
 				     dpaa_intf->name, fif->mac_type);
@@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (max_rx_pktlen <= buffsz) {
 		;
 	} else if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SCATTER) {
+			RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
 			DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
 				"MaxSGlist %d",
@@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
 	else
 		return dpaa_eth_dev_stop(dev);
 	return 0;
@@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 	__fif = container_of(fif, struct __fman_if, __if);
 
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-		dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+		dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
 	else
 		dpaa_eth_dev_start(dev);
 	return 0;
@@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (fc_conf->mode == RTE_FC_NONE) {
+	if (fc_conf->mode == RTE_ETH_FC_NONE) {
 		return 0;
-	} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-		 fc_conf->mode == RTE_FC_FULL) {
+	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+		 fc_conf->mode == RTE_ETH_FC_FULL) {
 		fman_if_set_fc_threshold(dev->process_private,
 					 fc_conf->high_water,
 					 fc_conf->low_water,
@@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 	}
 	ret = fman_if_get_fc_threshold(dev->process_private);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time =
 			fman_if_get_fc_quanta(dev->process_private);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
 	fc_conf = dpaa_intf->fc_conf;
 	ret = fman_if_get_fc_threshold(fman_intf);
 	if (ret) {
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
 	} else {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b5728e09c29f..c868e9d5bd9b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -74,11 +74,11 @@
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
 #define DPAA_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP)
 
 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
 		PKT_TX_IP_CKSUM |                \
diff --git a/drivers/net/dpaa/dpaa_flow.c b/drivers/net/dpaa/dpaa_flow.c
index c5b5ec869519..1ccd03602790 100644
--- a/drivers/net/dpaa/dpaa_flow.c
+++ b/drivers/net/dpaa/dpaa_flow.c
@@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1U << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_L2_PAYLOAD:
 
 				if (l2_configured)
 					break;
@@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_ETH;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
 
 				if (ipv4_configured)
 					break;
@@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV4;
 				break;
 
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (ipv6_configured)
 					break;
@@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_IPV6;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
 
 				if (tcp_configured)
 					break;
@@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_TCP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (udp_configured)
 					break;
@@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
 					= HEADER_TYPE_UDP;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 08f49af7685d..3170694841df 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 		if (req_dist_set % 2 != 0) {
 			dist_field = 1ULL << loop;
 			switch (dist_field) {
-			case ETH_RSS_L2_PAYLOAD:
-			case ETH_RSS_ETH:
-
+			case RTE_ETH_RSS_L2_PAYLOAD:
+			case RTE_ETH_RSS_ETH:
 				if (l2_configured)
 					break;
 				l2_configured = 1;
@@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_PPPOE:
+			case RTE_ETH_RSS_PPPOE:
 				if (pppoe_configured)
 					break;
 				kg_cfg->extracts[i].extract.from_hdr.prot =
@@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_ESP:
+			case RTE_ETH_RSS_ESP:
 				if (esp_configured)
 					break;
 				esp_configured = 1;
@@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_AH:
+			case RTE_ETH_RSS_AH:
 				if (ah_configured)
 					break;
 				ah_configured = 1;
@@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_C_VLAN:
-			case ETH_RSS_S_VLAN:
+			case RTE_ETH_RSS_C_VLAN:
+			case RTE_ETH_RSS_S_VLAN:
 				if (vlan_configured)
 					break;
 				vlan_configured = 1;
@@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_MPLS:
+			case RTE_ETH_RSS_MPLS:
 
 				if (mpls_configured)
 					break;
@@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_IPV4:
-			case ETH_RSS_FRAG_IPV4:
-			case ETH_RSS_NONFRAG_IPV4_OTHER:
-			case ETH_RSS_IPV6:
-			case ETH_RSS_FRAG_IPV6:
-			case ETH_RSS_NONFRAG_IPV6_OTHER:
-			case ETH_RSS_IPV6_EX:
+			case RTE_ETH_RSS_IPV4:
+			case RTE_ETH_RSS_FRAG_IPV4:
+			case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+			case RTE_ETH_RSS_IPV6:
+			case RTE_ETH_RSS_FRAG_IPV6:
+			case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+			case RTE_ETH_RSS_IPV6_EX:
 
 				if (l3_configured)
 					break;
@@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 			break;
 
-			case ETH_RSS_NONFRAG_IPV4_TCP:
-			case ETH_RSS_NONFRAG_IPV6_TCP:
-			case ETH_RSS_NONFRAG_IPV4_UDP:
-			case ETH_RSS_NONFRAG_IPV6_UDP:
-			case ETH_RSS_IPV6_TCP_EX:
-			case ETH_RSS_IPV6_UDP_EX:
+			case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+			case RTE_ETH_RSS_IPV6_TCP_EX:
+			case RTE_ETH_RSS_IPV6_UDP_EX:
 
 				if (l4_configured)
 					break;
@@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
 				i++;
 				break;
 
-			case ETH_RSS_NONFRAG_IPV4_SCTP:
-			case ETH_RSS_NONFRAG_IPV6_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+			case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
 
 				if (sctp_configured)
 					break;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a0270e78520e..59e728577f53 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -38,33 +38,33 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_CHECKSUM |
-		DEV_RX_OFFLOAD_SCTP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_TIMESTAMP;
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-		DEV_RX_OFFLOAD_RSS_HASH |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_MT_LOCKFREE |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN Filter not avaialble */
 		if (!priv->max_vlan_filters) {
 			DPAA2_PMD_INFO("VLAN filter not available");
@@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 
 		if (dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_FILTER)
+			RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 					dev_rx_offloads_nodis;
 	dev_info->tx_offload_capa = dev_tx_offloads_sup |
 					dev_tx_offloads_nodis;
-	dev_info->speed_capa = ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
-	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
 	if (dpaa2_svr_family == SVR_LX2160A) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-				ETH_LINK_SPEED_40G |
-				ETH_LINK_SPEED_50G |
-				ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+				RTE_ETH_LINK_SPEED_40G |
+				RTE_ETH_LINK_SPEED_50G |
+				RTE_ETH_LINK_SPEED_100G;
 	}
 
 	return 0;
@@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} rx_offload_map[] = {
-			{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-			{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-			{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-			{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-			{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-			{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-			{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+			{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+			{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+			{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+			{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+			{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+			{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
 	};
 
 	/* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 		uint64_t flags;
 		const char *output;
 	} tx_offload_map[] = {
-			{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-			{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-			{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-			{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-			{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-			{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-			{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-			{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-			{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+			{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+			{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+			{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+			{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+			{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+			{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+			{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+			{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 	};
 
 	/* Update Tx offload info */
@@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return -1;
 	}
 
-	if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 			ret = dpaa2_setup_flow_dist(dev,
 					eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rx_l3_csum_offload = true;
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-		(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+		(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
 		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-	if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
 	{
 		ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		dpaa2_enable_ts[dev->data->port_id] = true;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		tx_l3_csum_offload = true;
 
-	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+	if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
 		tx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
 	dpaa2_tm_init(dev);
 
@@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 			DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
 			return -1;
 		}
-		if (state.up == ETH_LINK_DOWN &&
+		if (state.up == RTE_ETH_LINK_DOWN &&
 		    wait_to_complete)
 			rte_delay_ms(CHECK_INTERVAL);
 		else
@@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
 	link.link_speed = state.rate;
 
 	if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	ret = rte_eth_linkstatus_set(dev, &link);
 	if (ret == -1)
@@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	No TX side flow control (send Pause frame disabled)
 		 */
 		if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_RX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	} else {
 		/* DPNI_LINK_OPT_PAUSE not set
 		 *  if ASYM_PAUSE set,
@@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 *	Flow control disabled
 		 */
 		if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return ret;
@@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	/* update cfg with fc_conf */
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		/* Full flow control;
 		 * OPT_PAUSE set, ASYM_PAUSE not set
 		 */
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		/* Enable RX flow control
 		 * OPT_PAUSE not set;
 		 * ASYM_PAUSE set;
@@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		/* Enable TX Flow control
 		 * OPT_PAUSE set
 		 * ASYM_PAUSE set
@@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		cfg.options |= DPNI_LINK_OPT_PAUSE;
 		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		/* Disable Flow control
 		 * OPT_PAUSE not set
 		 * ASYM_PAUSE not set
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fdc62ec30d22..c5e9267bf04d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -65,17 +65,17 @@
 #define DPAA2_TX_CONF_ENABLE	0x08
 
 #define DPAA2_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IP | \
-	ETH_RSS_UDP | \
-	ETH_RSS_TCP | \
-	ETH_RSS_SCTP | \
-	ETH_RSS_MPLS | \
-	ETH_RSS_C_VLAN | \
-	ETH_RSS_S_VLAN | \
-	ETH_RSS_ESP | \
-	ETH_RSS_AH | \
-	ETH_RSS_PPPOE)
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IP | \
+	RTE_ETH_RSS_UDP | \
+	RTE_ETH_RSS_TCP | \
+	RTE_ETH_RSS_SCTP | \
+	RTE_ETH_RSS_MPLS | \
+	RTE_ETH_RSS_C_VLAN | \
+	RTE_ETH_RSS_S_VLAN | \
+	RTE_ETH_RSS_ESP | \
+	RTE_ETH_RSS_AH | \
+	RTE_ETH_RSS_PPPOE)
 
 /* LX2 FRC Parsed values (Little Endian) */
 #define DPAA2_PKT_TYPE_ETHER		0x0060
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index f40369e2c3f9..7c77243b5d1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -773,7 +773,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP)
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			rte_vlan_strip(bufs[num_rx]);
 
 		dq_storage++;
@@ -987,7 +987,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 							eth_data->port_id);
 
 		if (eth_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_VLAN_STRIP) {
+				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			rte_vlan_strip(bufs[num_rx]);
 		}
 
@@ -1230,7 +1230,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					if (unlikely(((*bufs)->ol_flags
 						& PKT_TX_VLAN_PKT) ||
 						(eth_data->dev_conf.txmode.offloads
-						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -1273,7 +1273,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
 				(eth_data->dev_conf.txmode.offloads
-				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
 				int ret = rte_vlan_insert(bufs);
 				if (ret)
 					goto send_n_return;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 7d5d6377859a..a548ae2ccb2c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -82,15 +82,15 @@
 #define E1000_FTQF_QUEUE_ENABLE          0x00000100
 
 #define IGB_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 /*
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 73152dec6ed1..9da477e59def 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -597,8 +597,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	e1000_clear_hw_cntrs_base_generic(hw);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_em_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to update vlan offload");
@@ -611,39 +611,39 @@ eth_em_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -1102,9 +1102,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	/* Preferred queue parameters */
 	dev_info->default_rxportconf.nb_queues = 1;
@@ -1162,17 +1162,17 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -1424,15 +1424,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1601,7 +1601,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
 	if (link.link_status) {
 		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, link.link_speed,
-			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
@@ -1683,13 +1683,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 344149c19147..648b04154c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -93,7 +93,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -173,7 +173,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1171,11 +1171,11 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	RTE_SET_USED(dev);
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	return tx_offload_capa;
 }
@@ -1369,13 +1369,13 @@ em_get_rx_port_offloads_capa(void)
 	uint64_t rx_offload_capa;
 
 	rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP  |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM   |
-		DEV_RX_OFFLOAD_KEEP_CRC    |
-		DEV_RX_OFFLOAD_SCATTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return rx_offload_capa;
 }
@@ -1469,7 +1469,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1788,7 +1788,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -1831,7 +1831,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1844,7 +1844,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1870,7 +1870,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index dbe811a1ad2f..ae3bc4a9c201 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1073,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
-	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
-	    tx_mq_mode == ETH_MQ_TX_DCB ||
-	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* Check multi-queue mode.
-		 * To no break software we accept ETH_MQ_RX_NONE as this might
+		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
 		 * be used to turn off VLAN filter.
 		 */
 
-		if (rx_mq_mode == ETH_MQ_RX_NONE ||
-		    rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 		} else {
 			/* Only support one queue on VFs.
@@ -1099,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 		/* TX mode is not used here, so mode might be ignored.*/
-		if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
 					" TX mode %d is not supported. "
 					" Driver will behave as %d mode.",
-					tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
 		}
 
 		/* check valid queue number */
@@ -1117,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
 		/* To no break software that set invalid mode, only display
 		 * warning if invalid mode is used.
 		 */
-		if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		    rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
-		    rx_mq_mode != ETH_MQ_RX_RSS) {
+		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 			/* RSS together with VMDq not supported*/
 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				     rx_mq_mode);
 			return -EINVAL;
 		}
 
-		if (tx_mq_mode != ETH_MQ_TX_NONE &&
-		    tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
 					" Due to txmode is meaningless in this"
 					" driver, just ignore.",
@@ -1146,8 +1146,8 @@ eth_igb_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = igb_check_mq_mode(dev);
@@ -1287,8 +1287,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 	/*
 	 * VLAN Offload Settings
 	 */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = eth_igb_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
@@ -1296,7 +1296,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable VLAN filter since VMDq always use VLAN filter */
 		igb_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1310,39 +1310,39 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
 		hw->mac.autoneg = 1;
 	} else {
 		num_speeds = 0;
-		autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		/* Reset */
 		hw->phy.autoneg_advertised = 0;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
@@ -2185,21 +2185,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_82576:
 		dev_info->max_rx_queues = 16;
 		dev_info->max_tx_queues = 16;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 16;
 		break;
 
 	case e1000_82580:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
 	case e1000_i350:
 		dev_info->max_rx_queues = 8;
 		dev_info->max_tx_queues = 8;
-		dev_info->max_vmdq_pools = ETH_8_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
 		dev_info->vmdq_queue_num = 8;
 		break;
 
@@ -2225,7 +2225,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		return -EINVAL;
 	}
 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2251,9 +2251,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2296,12 +2296,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2402,17 +2402,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 	} else if (!link_check) {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2588,7 +2588,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg = E1000_READ_REG(hw, E1000_VET);
 		reg = (reg & (~E1000_VET_VET_EXT)) |
 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
@@ -2703,22 +2703,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -2870,7 +2870,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
 				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
-				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3024,13 +3024,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3099,18 +3099,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		 * on configuration
 		 */
 		switch (fc_conf->mode) {
-		case RTE_FC_NONE:
+		case RTE_ETH_FC_NONE:
 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_RX_PAUSE:
+		case RTE_ETH_FC_RX_PAUSE:
 			ctrl |= E1000_CTRL_RFCE;
 			ctrl &= ~E1000_CTRL_TFCE;
 			break;
-		case RTE_FC_TX_PAUSE:
+		case RTE_ETH_FC_TX_PAUSE:
 			ctrl |= E1000_CTRL_TFCE;
 			ctrl &= ~E1000_CTRL_RFCE;
 			break;
-		case RTE_FC_FULL:
+		case RTE_ETH_FC_FULL:
 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
 			break;
 		default:
@@ -3258,22 +3258,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -3571,16 +3571,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
@@ -3612,16 +3612,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
 	uint16_t idx, shift;
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IGB_4_BIT_MASK);
 		if (!mask)
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 2ce74dd5a9a5..fe355ef6b3b5 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -88,7 +88,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+	RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a1d5eecc14a1..bcce2fc726d8 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -111,7 +111,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -186,7 +186,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -1459,13 +1459,13 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	RTE_SET_USED(dev);
-	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
-			  DEV_TX_OFFLOAD_TCP_TSO     |
-			  DEV_TX_OFFLOAD_MULTI_SEGS;
+	tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return tx_offload_capa;
 }
@@ -1640,19 +1640,19 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
-			  DEV_RX_OFFLOAD_VLAN_FILTER |
-			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_RX_OFFLOAD_UDP_CKSUM   |
-			  DEV_RX_OFFLOAD_TCP_CKSUM   |
-			  DEV_RX_OFFLOAD_KEEP_CRC    |
-			  DEV_RX_OFFLOAD_SCATTER     |
-			  DEV_RX_OFFLOAD_RSS_HASH;
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+			  RTE_ETH_RX_OFFLOAD_SCATTER     |
+			  RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == e1000_i350 ||
 	    hw->mac.type == e1000_i210 ||
 	    hw->mac.type == e1000_i211)
-		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	return rx_offload_capa;
 }
@@ -1733,7 +1733,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1950,23 +1950,23 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
 }
@@ -2032,23 +2032,23 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -2170,15 +2170,15 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
 			E1000_VMOLR_MPME);
 
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 			vmolr |= E1000_VMOLR_AUPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 			vmolr |= E1000_VMOLR_ROMPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 			vmolr |= E1000_VMOLR_ROPE;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 			vmolr |= E1000_VMOLR_BAM;
-		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+		if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 			vmolr |= E1000_VMOLR_MPME;
 
 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
@@ -2214,9 +2214,9 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	/* VLVF: set up filters for vlan tags as configured */
 	for (i = 0; i < cfg->nb_pool_maps; i++) {
 		/* set vlan id in VF register and set the valid bit */
-		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
-                        (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
-			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+			(cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
 			E1000_VLVF_POOLSEL_MASK)));
 	}
 
@@ -2268,7 +2268,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t mrqc;
 
-	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+	if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
 		/*
 		 * SRIOV active scheme
 		 * FIXME if support RSS together with VMDq & SRIOV
@@ -2282,14 +2282,14 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
+			case RTE_ETH_MQ_RX_RSS:
 				igb_rss_configure(dev);
 				break;
-			case ETH_MQ_RX_VMDQ_ONLY:
+			case RTE_ETH_MQ_RX_VMDQ_ONLY:
 				/*Configure general VMDQ only RX parameters*/
 				igb_vmdq_rx_hw_configure(dev);
 				break;
-			case ETH_MQ_RX_NONE:
+			case RTE_ETH_MQ_RX_NONE:
 				/* if mq_mode is none, disable rss mode.*/
 			default:
 				igb_rss_disable(dev);
@@ -2338,7 +2338,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Set maximum packet length by default, and might be updated
 		 * together with enabling/disabling dual VLAN.
 		 */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			max_len += VLAN_TAG_SIZE;
 
 		E1000_WRITE_REG(hw, E1000_RLPML, max_len);
@@ -2374,7 +2374,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -2444,7 +2444,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2488,16 +2488,16 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
 	if (rxmode->offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		rxcsum |= E1000_RXCSUM_TUOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
@@ -2505,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 
 		/* clear STRCRC bit in all queues */
@@ -2545,7 +2545,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
 	/* Make sure VLAN Filters are off. */
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
 		rctl &= ~E1000_RCTL_VFE;
 	/* Don't store bad packets. */
 	rctl &= ~E1000_RCTL_SBP;
@@ -2743,7 +2743,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f3b17d70c9a4..4d2601d15a57 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -117,10 +117,10 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-			DEV_TX_OFFLOAD_UDP_CKSUM |\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
 		       PKT_TX_IP_CKSUM |\
 		       PKT_TX_TCP_SEG)
@@ -332,7 +332,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
@@ -340,7 +340,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L3 checksum is needed */
 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -357,12 +357,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 
 		/* check if L4 checksum is needed */
 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
 				PKT_TX_UDP_CKSUM) &&
-				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
 		} else {
@@ -643,9 +643,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_link *link = &dev->data->dev_link;
 	struct ena_adapter *adapter = dev->data->dev_private;
 
-	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-	link->link_speed = ETH_SPEED_NUM_NONE;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return 0;
 }
@@ -923,7 +923,7 @@ static int ena_start(struct rte_eth_dev *dev)
 	if (rc)
 		goto err_start_tx;
 
-	if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 		rc = ena_rss_configure(adapter);
 		if (rc)
 			goto err_rss_init;
@@ -2004,9 +2004,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Scattered Rx cannot be turned off in the HW, so this capability must
 	 * be forced.
@@ -2067,17 +2067,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if (adapter->offloads.rx_offloads &
 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
 		port_offloads |=
-			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-	port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	return port_offloads;
 }
@@ -2087,17 +2087,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
 	uint64_t port_offloads = 0;
 
 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 	if (adapter->offloads.tx_offloads &
 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
 		port_offloads |=
-			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
-	port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return port_offloads;
 }
@@ -2130,14 +2130,14 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
 	dev_info->speed_capa =
-			ETH_LINK_SPEED_1G   |
-			ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_5G   |
-			ETH_LINK_SPEED_10G  |
-			ETH_LINK_SPEED_25G  |
-			ETH_LINK_SPEED_40G  |
-			ETH_LINK_SPEED_50G  |
-			ETH_LINK_SPEED_100G;
+			RTE_ETH_LINK_SPEED_1G   |
+			RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_5G   |
+			RTE_ETH_LINK_SPEED_10G  |
+			RTE_ETH_LINK_SPEED_25G  |
+			RTE_ETH_LINK_SPEED_40G  |
+			RTE_ETH_LINK_SPEED_50G  |
+			RTE_ETH_LINK_SPEED_100G;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
@@ -2303,7 +2303,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
@@ -2416,11 +2416,11 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
 		/* Check if requested offload is also enabled for the queue */
 		if ((ol_flags & PKT_TX_IP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_TCP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
 		    (l4_csum_flag == PKT_TX_UDP_CKSUM &&
-		     !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
 			PMD_TX_LOG(DEBUG,
 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
 				i, m->nb_segs, tx_ring->id);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 4f4142ed12d0..865e1241e0ce 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -58,8 +58,8 @@
 
 #define ENA_HASH_KEY_SIZE		40
 
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
diff --git a/drivers/net/ena/ena_rss.c b/drivers/net/ena/ena_rss.c
index 152098410fa2..be4007e3f3fe 100644
--- a/drivers/net/ena/ena_rss.c
+++ b/drivers/net/ena/ena_rss.c
@@ -76,7 +76,7 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -93,8 +93,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
 		/* Each reta_conf is for 64 entries.
 		 * To support 128 we use 2 conf of 64.
 		 */
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
 			entry_value =
 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
@@ -139,7 +139,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	if (reta_size == 0 || reta_conf == NULL)
 		return -EINVAL;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR,
 			"RSS was not configured for the PMD\n");
 		return -ENOTSUP;
@@ -154,8 +154,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0 ; i < reta_size ; i++) {
-		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
 			reta_conf[reta_conf_idx].reta[reta_idx] =
 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
@@ -199,34 +199,34 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Convert proto to ETH flag */
 	switch (proto) {
 	case ENA_ADMIN_RSS_TCP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP4:
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		break;
 	case ENA_ADMIN_RSS_TCP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 		break;
 	case ENA_ADMIN_RSS_UDP6:
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 		break;
 	case ENA_ADMIN_RSS_IP4:
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 		break;
 	case ENA_ADMIN_RSS_IP6:
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 		break;
 	case ENA_ADMIN_RSS_IP4_FRAG:
-		rss_hf |= ETH_RSS_FRAG_IPV4;
+		rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
 		break;
 	case ENA_ADMIN_RSS_NOT_IP:
-		rss_hf |= ETH_RSS_L2_PAYLOAD;
+		rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
 		break;
 	case ENA_ADMIN_RSS_TCP6_EX:
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 		break;
 	case ENA_ADMIN_RSS_IP6_EX:
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 		break;
 	default:
 		break;
@@ -235,10 +235,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L3. */
 	switch (fields & ENA_HF_RSS_ALL_L3) {
 	case ENA_ADMIN_RSS_L3_SA:
-		rss_hf |= ETH_RSS_L3_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L3_DA:
-		rss_hf |= ETH_RSS_L3_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
 		break;
 	default:
 		break;
@@ -247,10 +247,10 @@ static uint64_t ena_admin_hf_to_eth_hf(enum ena_admin_flow_hash_proto proto,
 	/* Check if only DA or SA is being used for L4. */
 	switch (fields & ENA_HF_RSS_ALL_L4) {
 	case ENA_ADMIN_RSS_L4_SP:
-		rss_hf |= ETH_RSS_L4_SRC_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
 		break;
 	case ENA_ADMIN_RSS_L4_DP:
-		rss_hf |= ETH_RSS_L4_DST_ONLY;
+		rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
 		break;
 	default:
 		break;
@@ -268,11 +268,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	/* Determine which fields of L3 should be used. */
-	switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
-	case ETH_RSS_L3_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+	case RTE_ETH_RSS_L3_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_DA;
 		break;
-	case ETH_RSS_L3_SRC_ONLY:
+	case RTE_ETH_RSS_L3_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L3_SA;
 		break;
 	default:
@@ -284,11 +284,11 @@ static uint16_t ena_eth_hf_to_admin_hf(enum ena_admin_flow_hash_proto proto,
 	}
 
 	/* Determine which fields of L4 should be used. */
-	switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
-	case ETH_RSS_L4_DST_ONLY:
+	switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+	case RTE_ETH_RSS_L4_DST_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_DP;
 		break;
-	case ETH_RSS_L4_SRC_ONLY:
+	case RTE_ETH_RSS_L4_SRC_ONLY:
 		fields_mask |= ENA_ADMIN_RSS_L4_SP;
 		break;
 	default:
@@ -334,43 +334,43 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
 	int rc, i;
 
 	/* Turn on appropriate fields for each requested packet type */
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
 		selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
 
-	if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+	if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
 
-	if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+	if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
 		selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
 
-	if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+	if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
 		selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
 			ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
 
@@ -541,7 +541,7 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 	uint16_t admin_hf;
 	static bool warn_once;
 
-	if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
 		return -ENOTSUP;
 	}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 1b567f01eae0..7cdb8ce463ed 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -100,27 +100,27 @@ enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
 
 	if (status & ENETC_LINK_MODE)
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	else
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 
 	if (status & ENETC_LINK_STATUS)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	switch (status & ENETC_LINK_SPEED_MASK) {
 	case ENETC_LINK_SPEED_1G:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case ENETC_LINK_SPEED_100M:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	default:
 	case ENETC_LINK_SPEED_10M:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -207,10 +207,10 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 	dev_info->max_tx_queues = MAX_TX_RINGS;
 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
 	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		 DEV_RX_OFFLOAD_UDP_CKSUM |
-		 DEV_RX_OFFLOAD_TCP_CKSUM |
-		 DEV_RX_OFFLOAD_KEEP_CRC);
+		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
 
 	return 0;
 }
@@ -463,7 +463,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
 			       RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
-	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				     RTE_ETHER_CRC_LEN : 0);
 
 	return 0;
@@ -705,7 +705,7 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		int config;
 
 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
@@ -713,10 +713,10 @@ enetc_dev_configure(struct rte_eth_dev *dev)
 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		checksum &= ~L3_CKSUM;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		checksum &= ~L4_CKSUM;
 
 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cfdd..d5493c98345d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -178,7 +178,7 @@ struct enic {
 	 */
 	uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
 	uint8_t rss_enable;
-	uint64_t rss_hf; /* ETH_RSS flags */
+	uint64_t rss_hf; /* RTE_ETH_RSS flags */
 	union vnic_rss_key rss_key;
 	union vnic_rss_cpu rss_cpu;
 
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8df7332bc5e0..c8bdaf1a8e79 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,30 +38,30 @@ static const struct vic_speed_capa {
 	uint16_t sub_devid;
 	uint32_t capa;
 } vic_speed_capa_map[] = {
-	{ 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-	{ 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-	{ 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-	{ 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-	{ 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-	{ 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-	{ 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-	{ 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-	{ 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-	{ 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-	{ 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-	{ 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-	{ 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-	{ 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-	{ 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-	{ 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-		  ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-	{ 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-	{ 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-	{ 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-	{ 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-	{ 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-	{ 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+	{ 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+	{ 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+	{ 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+	{ 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+	{ 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+	{ 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+	{ 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+	{ 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+	{ 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+	{ 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+	{ 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+	{ 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+	{ 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+	{ 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+	{ 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+	{ 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+		  RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+	{ 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+	{ 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+	{ 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+	{ 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+	{ 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+	{ 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
 	{ 0, 0 }, /* End marker */
 };
 
@@ -297,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	ENICPMD_FUNC_TRACE();
 
 	offloads = eth_dev->data->dev_conf.rxmode.offloads;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			enic->ig_vlan_strip_en = 1;
 		else
 			enic->ig_vlan_strip_en = 0;
@@ -323,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	enic->mc_count = 0;
 	enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-				  DEV_RX_OFFLOAD_CHECKSUM);
+				  RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* All vlan offload masks to apply the current settings */
-	mask = ETH_VLAN_STRIP_MASK |
-		ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+		RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = enicpmd_vlan_offload_set(eth_dev, mask);
 	if (ret) {
 		dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -435,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
 	}
 	/* 1300 and later models are at least 40G */
 	if (id >= 0x0100)
-		return ETH_LINK_SPEED_40G;
+		return RTE_ETH_LINK_SPEED_40G;
 	/* VFs have subsystem id 0, check device id */
 	if (id == 0) {
 		/* Newer VF implies at least 40G model */
 		if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-			return ETH_LINK_SPEED_40G;
+			return RTE_ETH_LINK_SPEED_40G;
 	}
-	return ETH_LINK_SPEED_10G;
+	return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -774,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
 				enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -806,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
 	 */
 	rss_cpu = enic->rss_cpu;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			rss_cpu.cpu[i / 4].b[i % 4] =
 				enic_rte_rq_idx_to_sop_idx(
@@ -883,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 	 */
 	conf->offloads = enic->rx_offload_capa;
 	if (!enic->ig_vlan_strip_en)
-		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -969,8 +969,8 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
 				   struct rte_eth_udp_tunnel *tnl)
 {
-	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
-	    tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+	if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+	    tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
 		return -ENOTSUP;
 	if (!enic->overlay_offload) {
 		ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
@@ -1010,7 +1010,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
@@ -1039,7 +1039,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
 	ret = udp_tunnel_common_check(enic, tnl);
 	if (ret)
 		return ret;
-	vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+	vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
 	if (vxlan)
 		port = enic->vxlan_port;
 	else
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index dfc7f5d1f94f..21b1fffb14f0 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
 	memset(&link, 0, sizeof(link));
 	link.link_status = enic_get_link_status(enic);
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_speed = vnic_dev_port_speed(enic->vdev);
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
 	}
 
 	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* vnic notification of link status has already been turned on in
 	 * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
 	 * and vlan insertion are supported.
 	 */
 	simple_tx_offloads = enic->tx_offload_capa &
-		(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_VLAN_INSERT |
-		 DEV_TX_OFFLOAD_IPV4_CKSUM |
-		 DEV_TX_OFFLOAD_UDP_CKSUM |
-		 DEV_TX_OFFLOAD_TCP_CKSUM);
+		(RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if ((eth_dev->data->dev_conf.txmode.offloads &
 	     ~simple_tx_offloads) == 0) {
 		ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
 	if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_SCATTER) {
+	    RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((max pkt len)/mbuf_size) */
 		mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 	rss_hash_type = 0;
 	rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
 	if (enic->rq_count > 1 &&
-	    (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+	    (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
 	    rss_hf != 0) {
 		rss_enable = 1;
-		if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			      ETH_RSS_NONFRAG_IPV4_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
 			if (enic->udp_rss_weak) {
 				/*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
 			}
 		}
-		if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-			      ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+		if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+			      RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-		if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+		if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
 			rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
 			if (enic->udp_rss_weak)
 				rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,9 +1745,9 @@ enic_enable_overlay_offload(struct enic *enic)
 		return -EINVAL;
 	}
 	enic->tx_offload_capa |=
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-		(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+		(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
 	enic->tx_offload_mask |=
 		PKT_TX_OUTER_IPV6 |
 		PKT_TX_OUTER_IPV4 |
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c5777772a09e..918a9e170ff6 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -147,31 +147,31 @@ int enic_get_vnic_config(struct enic *enic)
 		 * IPV4 hash type handles both non-frag and frag packet types.
 		 * TCP/UDP is controlled via a separate flag below.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
-			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+			RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (ENIC_SETTING(enic, RSSHASH_IPV6))
 		/*
 		 * The VIC adapter can perform RSS on IPv6 packets with and
 		 * without extension headers. An IPv6 "fragment" is an IPv6
 		 * packet with the fragment extension header.
 		 */
-		enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
-			ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+			RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
 	if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
-			ETH_RSS_IPV6_TCP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			RTE_ETH_RSS_IPV6_TCP_EX;
 	if (enic->udp_rss_weak)
 		enic->flow_type_rss_offloads |=
-			ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
-		enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
-			ETH_RSS_IPV6_UDP_EX;
+		enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			RTE_ETH_RSS_IPV6_UDP_EX;
 
 	/* Zero offloads if RSS is not enabled */
 	if (!ENIC_SETTING(enic, RSS))
@@ -201,19 +201,19 @@ int enic_get_vnic_config(struct enic *enic)
 	enic->tx_queue_offload_capa = 0;
 	enic->tx_offload_capa =
 		enic->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	enic->rx_offload_capa =
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	enic->tx_offload_mask =
 		PKT_TX_IPV6 |
 		PKT_TX_IPV4 |
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index b87c036e6014..82d595b1d1a0 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -17,10 +17,10 @@
 
 const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
 static const struct rte_eth_link eth_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_UP,
-	.link_autoneg = ETH_LINK_AUTONEG,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_UP,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG,
 };
 
 static int
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 602c04033c18..5f4810051dac 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -326,7 +326,7 @@ int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
 	int qid;
 	struct rte_eth_dev *fsdev;
 	struct rxq **rxq;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 				&ETH(sdev)->data->dev_conf.intr_conf;
 
 	fsdev = fs_dev(sdev);
@@ -519,7 +519,7 @@ int
 failsafe_rx_intr_install(struct rte_eth_dev *dev)
 {
 	struct fs_priv *priv = PRIV(dev);
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 			&priv->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 29de39910c6e..a3a8a1c82e3a 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1172,51 +1172,51 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
 	 * configuring a sub-device.
 	 */
 	infos->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->rx_queue_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_LRO |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_MACSEC_STRIP |
-		DEV_RX_OFFLOAD_HEADER_SPLIT |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_TIMESTAMP |
-		DEV_RX_OFFLOAD_SECURITY |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_LRO |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+		RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+		RTE_ETH_RX_OFFLOAD_SECURITY |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	infos->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	infos->flow_type_rss_offloads =
-		ETH_RSS_IP |
-		ETH_RSS_UDP |
-		ETH_RSS_TCP;
+		RTE_ETH_RSS_IP |
+		RTE_ETH_RSS_UDP |
+		RTE_ETH_RSS_TCP;
 	infos->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 17c73c4dc5ae..b7522a47a80b 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -177,7 +177,7 @@ struct fm10k_rx_queue {
 	uint8_t drop_en;
 	uint8_t rx_deferred_start; /* don't start this queue in dev start. */
 	uint16_t rx_ftag_en; /* indicates FTAG RX supported */
-	uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /*
@@ -209,7 +209,7 @@ struct fm10k_tx_queue {
 	uint16_t next_rs; /* Next pos to set RS flag */
 	uint16_t next_dd; /* Next pos to check DD flag */
 	volatile uint32_t *tail_ptr;
-	uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
 	uint16_t nb_desc;
 	uint16_t port_id;
 	uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 66f4a5c6df2c..d256334bfde9 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -413,12 +413,12 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev)
 
 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 		return -EINVAL;
 	}
 
-	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		return 0;
 
 	if (hw->mac.type == fm10k_mac_vf) {
@@ -449,8 +449,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
@@ -510,7 +510,7 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 	};
 
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 		return;
@@ -547,15 +547,15 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 	 */
 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	if (mrqc == 0) {
 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
@@ -602,7 +602,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	if (hw->mac.type != fm10k_mac_pf)
 		return;
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		nb_queue_pools = vmdq_conf->nb_queue_pools;
 
 	/* no pool number change, no need to update logic port and VLAN/MAC */
@@ -759,7 +759,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
-			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+			rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 			uint32_t reg;
 			dev->data->scattered_rx = 1;
 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1145,7 +1145,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Update default vlan when not in VMDQ mode */
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
 
 	fm10k_link_update(dev, 0);
@@ -1222,11 +1222,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	PMD_INIT_FUNC_TRACE();
 
-	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_speed  = RTE_ETH_SPEED_NUM_50G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	dev->data->dev_link.link_status =
-		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+		dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return 0;
 }
@@ -1378,7 +1378,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_vfs            = pdev->max_vfs;
 	dev_info->vmdq_pool_base     = 0;
 	dev_info->vmdq_queue_base    = 0;
-	dev_info->max_vmdq_pools     = ETH_32_POOLS;
+	dev_info->max_vmdq_pools     = RTE_ETH_32_POOLS;
 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
@@ -1389,15 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
-	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-					ETH_RSS_IPV6 |
-					ETH_RSS_IPV6_EX |
-					ETH_RSS_NONFRAG_IPV4_TCP |
-					ETH_RSS_NONFRAG_IPV6_TCP |
-					ETH_RSS_IPV6_TCP_EX |
-					ETH_RSS_NONFRAG_IPV4_UDP |
-					ETH_RSS_NONFRAG_IPV6_UDP |
-					ETH_RSS_IPV6_UDP_EX;
+	dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+					RTE_ETH_RSS_IPV6 |
+					RTE_ETH_RSS_IPV6_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+					RTE_ETH_RSS_IPV6_TCP_EX |
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+					RTE_ETH_RSS_IPV6_UDP_EX;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -1435,9 +1435,9 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1509,7 +1509,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 		return -EINVAL;
 	}
 
-	if (vlan_id > ETH_VLAN_ID_MAX) {
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
 		return -EINVAL;
 	}
@@ -1767,20 +1767,20 @@ static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+	return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
 }
 
 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
-			   DEV_RX_OFFLOAD_VLAN_FILTER |
-			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
-			   DEV_RX_OFFLOAD_UDP_CKSUM   |
-			   DEV_RX_OFFLOAD_TCP_CKSUM   |
-			   DEV_RX_OFFLOAD_HEADER_SPLIT |
-			   DEV_RX_OFFLOAD_RSS_HASH);
+	return  (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+			   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+			   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+			   RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+			   RTE_ETH_RX_OFFLOAD_RSS_HASH);
 }
 
 static int
@@ -1965,12 +1965,12 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 
-	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
-			  DEV_TX_OFFLOAD_MULTI_SEGS  |
-			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
-			  DEV_TX_OFFLOAD_UDP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_CKSUM   |
-			  DEV_TX_OFFLOAD_TCP_TSO);
+	return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+			  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+			  RTE_ETH_TX_OFFLOAD_TCP_TSO);
 }
 
 static int
@@ -2111,8 +2111,8 @@ fm10k_reta_update(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2160,8 +2160,8 @@ fm10k_reta_query(struct rte_eth_dev *dev,
 	 * 128-entries in 32 registers
 	 */
 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				BIT_MASK_PER_UINT32);
 		if (mask == 0)
@@ -2198,15 +2198,15 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	mrqc = 0;
-	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
-	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
-	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
+	mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 
 	/* If the mapping doesn't fit any supported, return */
 	if (mrqc == 0)
@@ -2243,15 +2243,15 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
 	hf = 0;
-	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
-	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
-	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
-	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV4)     ? RTE_ETH_RSS_IPV4              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6              : 0;
+	hf |= (mrqc & FM10K_MRQC_IPV6)     ? RTE_ETH_RSS_IPV6_EX           : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP  : 0;
+	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX       : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP  : 0;
+	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX       : 0;
 
 	rss_conf->rss_hf = hf;
 
@@ -2606,7 +2606,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 
 			/* first clear the internal SW recording structure */
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					false);
 
@@ -2622,7 +2622,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
 					MAIN_VSI_POOL_NUMBER);
 
 			if (!(dev->data->dev_conf.rxmode.mq_mode &
-						ETH_MQ_RX_VMDQ_FLAG))
+						RTE_ETH_MQ_RX_VMDQ_FLAG))
 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
 					true);
 
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 83af01dc2da6..50973a662c67 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -208,11 +208,11 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
 	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 #endif
 
@@ -221,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
index cb9cf6efa287..80f9eb5c3031 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwdev.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -1320,28 +1320,28 @@ hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
 				    struct rte_eth_dev *eth_dev, u8 status)
 {
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 	struct nic_port_info port_info;
 	struct rte_eth_link link;
 	int rc = HINIC_OK;
 
 	if (!status) {
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
 		memset(&port_info, 0, sizeof(port_info));
 		rc = hinic_get_port_info(hwdev, &port_info);
 		if (rc) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
-			link.link_autoneg = ETH_LINK_FIXED;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = RTE_ETH_LINK_FIXED;
 		} else {
 			link.link_speed = port_speed[port_info.speed %
 						LINK_SPEED_MAX];
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index c2374ebb6759..4cd5a85d5f8d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -311,8 +311,8 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* mtu size is 256~9600 */
 	if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
@@ -338,7 +338,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
 
 	/* init vlan offoad */
 	err = hinic_vlan_offload_set(dev,
-				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+				RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
 		(void)hinic_config_mq_mode(dev, FALSE);
@@ -696,15 +696,15 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
 	} else {
 		*speed_capa = 0;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
-			*speed_capa |= ETH_LINK_SPEED_1G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_1G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
-			*speed_capa |= ETH_LINK_SPEED_10G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_10G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
-			*speed_capa |= ETH_LINK_SPEED_25G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_25G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
-			*speed_capa |= ETH_LINK_SPEED_40G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_40G;
 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
-			*speed_capa |= ETH_LINK_SPEED_100G;
+			*speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	}
 }
 
@@ -732,24 +732,24 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 
 	hinic_get_speed_capa(dev, &info->speed_capa);
 	info->rx_queue_offload_capa = 0;
-	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM |
-				DEV_RX_OFFLOAD_TCP_CKSUM |
-				DEV_RX_OFFLOAD_VLAN_FILTER |
-				DEV_RX_OFFLOAD_SCATTER |
-				DEV_RX_OFFLOAD_TCP_LRO |
-				DEV_RX_OFFLOAD_RSS_HASH;
+	info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				RTE_ETH_RX_OFFLOAD_SCATTER |
+				RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	info->tx_queue_offload_capa = 0;
-	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_UDP_CKSUM |
-				DEV_TX_OFFLOAD_TCP_CKSUM |
-				DEV_TX_OFFLOAD_SCTP_CKSUM |
-				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				DEV_TX_OFFLOAD_TCP_TSO |
-				DEV_TX_OFFLOAD_MULTI_SEGS;
+	info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
 	info->reta_size = HINIC_RSS_INDIR_SIZE;
@@ -846,20 +846,20 @@ static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
 	u8 port_link_status = 0;
 	struct nic_port_info port_link_info;
 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
-	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
-					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
-					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
-					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+					RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+					RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+					RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
 
 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
 	if (rc)
 		return rc;
 
 	if (!port_link_status) {
-		link->link_status = ETH_LINK_DOWN;
+		link->link_status = RTE_ETH_LINK_DOWN;
 		link->link_speed = 0;
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
-		link->link_autoneg = ETH_LINK_FIXED;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_FIXED;
 		return HINIC_OK;
 	}
 
@@ -901,8 +901,8 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* Get link status information from hardware */
 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
 		if (rc != HINIC_OK) {
-			link.link_speed = ETH_SPEED_NUM_NONE;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Get link status failed");
 			goto out;
 		}
@@ -1650,8 +1650,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	int err;
 
 	/* Enable or disable VLAN filter */
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
 			TRUE : FALSE;
 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
@@ -1672,8 +1672,8 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Enable or disable VLAN stripping */
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
 			TRUE : FALSE;
 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
 		if (err) {
@@ -1859,13 +1859,13 @@ static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
 	fc_conf->autoneg = nic_pause.auto_neg;
 
 	if (nic_pause.tx_pause && nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (nic_pause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else if (nic_pause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -1879,14 +1879,14 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
 
 	nic_pause.auto_neg = fc_conf->autoneg;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		nic_pause.tx_pause = true;
 	else
 		nic_pause.tx_pause = false;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-		(fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+		(fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		nic_pause.rx_pause = true;
 	else
 		nic_pause.rx_pause = false;
@@ -1930,7 +1930,7 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err = 0;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_OK;
 	}
@@ -1951,14 +1951,14 @@ static int hinic_rss_hash_update(struct rte_eth_dev *dev,
 		}
 	}
 
-	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 
 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
 	if (err) {
@@ -1994,7 +1994,7 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 	struct nic_rss_type rss_type = {0};
 	int err;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
 		return HINIC_ERROR;
 	}
@@ -2015,15 +2015,15 @@ static int hinic_rss_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	rss_conf->rss_hf |=  rss_type.ipv4 ?
-		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
 	rss_conf->rss_hf |=  rss_type.ipv6 ?
-		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
-	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
-	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
-	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+	rss_conf->rss_hf |=  rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
 
 	return HINIC_OK;
 }
@@ -2053,7 +2053,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 	u16 i = 0;
 	u16 idx, shift;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
 		return HINIC_OK;
 
 	if (reta_size != NIC_RSS_INDIR_SIZE) {
@@ -2067,8 +2067,8 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
 
 	/* update rss indir_tbl */
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 
 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
@@ -2133,8 +2133,8 @@ static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 842399cc4cd8..d347afe9a6a9 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -504,14 +504,14 @@ static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
 {
 	u64 rss_hf = rss_conf->rss_hf;
 
-	rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
-	rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
-	rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
-	rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
-	rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
-	rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
-	rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
-	rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+	rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+	rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+	rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+	rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+	rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+	rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+	rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+	rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
 }
 
 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
@@ -588,8 +588,8 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 {
 	int err, i;
 
-	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+	if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 		nic_dev->num_rss = 0;
 		if (nic_dev->num_rq > 1) {
 			/* get rss template id */
@@ -599,7 +599,7 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 				PMD_DRV_LOG(WARNING, "Alloc rss template failed");
 				return err;
 			}
-			nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+			nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
 			for (i = 0; i < nic_dev->num_rq; i++)
 				hinic_add_rq_to_rx_queue_list(nic_dev, i);
 		}
@@ -610,12 +610,12 @@ static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
 
 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
 {
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (hinic_rss_template_free(nic_dev->hwdev,
 					    nic_dev->rss_tmpl_idx))
 			PMD_DRV_LOG(WARNING, "Free rss template failed");
 
-		nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+		nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
 	}
 }
 
@@ -641,7 +641,7 @@ int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
 	int ret = 0;
 
 	switch (dev_conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		ret = hinic_config_mq_rx_rss(nic_dev, on);
 		break;
 	default:
@@ -662,7 +662,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	int lro_wqe_num;
 	int buf_size;
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		if (rss_conf.rss_hf == 0) {
 			rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
 		} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
@@ -678,7 +678,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
@@ -687,7 +687,7 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 		goto rx_csum_ofl_err;
 
 	/* config lro */
-	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+	lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
 			true : false;
 	max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
 	buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
@@ -726,7 +726,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
 {
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 
-	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+	if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
 		hinic_rss_deinit(nic_dev);
 		hinic_destroy_num_qps(nic_dev);
 	}
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 8a45f2d9fc50..5c303398b635 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -8,17 +8,17 @@
 #define HINIC_DEFAULT_RX_FREE_THRESH	32
 
 #define HINIC_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |\
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |\
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 enum rq_completion_fmt {
 	RQ_COMPLETE_SGE = 1
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 8753c340e790..3d0159d78778 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -1536,7 +1536,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 		if (dcb_rx_conf->nb_tcs == 0)
 			hw->dcb_info.pfc_en = 1; /* tc0 only */
@@ -1693,7 +1693,7 @@ hns3_update_queue_map_configure(struct hns3_adapter *hns)
 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		return 0;
 
 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
@@ -1713,22 +1713,22 @@ static void
 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
 {
 	switch (mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->requested_fc_mode = HNS3_FC_FULL;
 		break;
 	default:
 		hw->requested_fc_mode = HNS3_FC_NONE;
 		hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
-			  "configured to RTE_FC_NONE", mode);
+			  "configured to RTE_ETH_FC_NONE", mode);
 		break;
 	}
 }
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 6b89bcef97ba..9881659cebfc 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -60,29 +60,29 @@ enum hns3_evt_cause {
 };
 
 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
-	{ ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
 
-	{ ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
 			     RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
@@ -500,8 +500,8 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	struct hns3_cmd_desc desc;
 	int ret;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
 		hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 		return -EINVAL;
 	}
@@ -514,10 +514,10 @@ hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 	rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
-	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+	} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
 		rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 		rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
@@ -725,11 +725,11 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	rte_spinlock_lock(&hw->lock);
 	rxmode = &dev->data->dev_conf.rxmode;
 	tmp_mask = (unsigned int)mask;
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* ignore vlan filter configuration during promiscuous mode */
 		if (!dev->data->promiscuous) {
 			/* Enable or disable VLAN filter */
-			enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+			enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
 				 true : false;
 
 			ret = hns3_enable_vlan_filter(hns, enable);
@@ -742,9 +742,9 @@ hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+		enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
 		    true : false;
 
 		ret = hns3_en_hw_strip_rxvtag(hns, enable);
@@ -1118,7 +1118,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 		return ret;
 	}
 
-	ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+	ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
 				       RTE_ETHER_TYPE_VLAN);
 	if (ret) {
 		hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
@@ -1161,7 +1161,7 @@ hns3_restore_vlan_conf(struct hns3_adapter *hns)
 	if (!hw->data->promiscuous) {
 		/* restore vlan filter states */
 		offloads = hw->data->dev_conf.rxmode.offloads;
-		enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+		enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
 		ret = hns3_enable_vlan_filter(hns, enable);
 		if (ret) {
 			hns3_err(hw, "failed to restore vlan rx filter conf, "
@@ -1204,7 +1204,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
 			  txmode->hw_vlan_reject_untagged);
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	ret = hns3_vlan_offload_set(dev, mask);
 	if (ret) {
 		hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
@@ -2213,9 +2213,9 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 	int max_tc = 0;
 	int i;
 
-	if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
-	    (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
-	     tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+	if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+	    (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+	     tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
 		hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
 			 rx_mq_mode, tx_mq_mode);
 		return -EOPNOTSUPP;
@@ -2223,7 +2223,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
 	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
 			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
 				 dcb_rx_conf->nb_tcs, pf->tc_max);
@@ -2232,7 +2232,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
 
 		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
 		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
-			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+			hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
 				 "nb_tcs(%d) != %d or %d in rx direction.",
 				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
 			return -EINVAL;
@@ -2400,11 +2400,11 @@ hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
 	 * configure link_speeds (default 0), which means auto-negotiation.
 	 * In this case, it should return success.
 	 */
-	if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
 	    hw->mac.support_autoneg == 0)
 		return 0;
 
-	if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+	if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
 		ret = hns3_check_port_speed(hw, link_speeds);
 		if (ret)
 			return ret;
@@ -2464,15 +2464,15 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		goto cfg_err;
 
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_setup_dcb(dev);
 		if (ret)
 			goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -2493,7 +2493,7 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -2600,15 +2600,15 @@ hns3_get_copper_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
-		speed_capa |= ETH_LINK_SPEED_10M;
+		speed_capa |= RTE_ETH_LINK_SPEED_10M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M_HD;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
-		speed_capa |= ETH_LINK_SPEED_100M;
+		speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 
 	return speed_capa;
 }
@@ -2619,19 +2619,19 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed)
 	uint32_t speed_capa = 0;
 
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
-		speed_capa |= ETH_LINK_SPEED_1G;
+		speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
-		speed_capa |= ETH_LINK_SPEED_10G;
+		speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
-		speed_capa |= ETH_LINK_SPEED_25G;
+		speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
-		speed_capa |= ETH_LINK_SPEED_40G;
+		speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
-		speed_capa |= ETH_LINK_SPEED_50G;
+		speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
-		speed_capa |= ETH_LINK_SPEED_100G;
+		speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
-		speed_capa |= ETH_LINK_SPEED_200G;
+		speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	return speed_capa;
 }
@@ -2650,7 +2650,7 @@ hns3_get_speed_capa(struct hns3_hw *hw)
 			hns3_get_firber_port_speed_capa(mac->supported_speed);
 
 	if (mac->support_autoneg == 0)
-		speed_capa |= ETH_LINK_SPEED_FIXED;
+		speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
 
 	return speed_capa;
 }
@@ -2676,40 +2676,40 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_KEEP_CRC |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
 	if (hns3_dev_get_support(hw, PTP))
-		info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = HNS3_MAX_RING_DESC,
@@ -2793,7 +2793,7 @@ hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
 
 	ret = hns3_update_link_info(eth_dev);
 	if (ret)
-		hw->mac.link_status = ETH_LINK_DOWN;
+		hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	return ret;
 }
@@ -2806,29 +2806,29 @@ hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link->link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link->link_speed = ETH_SPEED_NUM_NONE;
+		new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link->link_duplex = mac->link_duplex;
-	new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link->link_autoneg = mac->link_autoneg;
 }
 
@@ -2848,8 +2848,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
 		new_link.link_duplex = mac->link_duplex;
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
-		new_link.link_status = ETH_LINK_DOWN;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		new_link.link_status = RTE_ETH_LINK_DOWN;
 		goto out;
 	}
 
@@ -2861,7 +2861,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
 			break;
 		}
 
-		if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+		if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
 			break;
 
 		rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
@@ -3207,31 +3207,31 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 {
 	switch (speed_cmd) {
 	case HNS3_CFG_SPEED_10M:
-		*speed = ETH_SPEED_NUM_10M;
+		*speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case HNS3_CFG_SPEED_100M:
-		*speed = ETH_SPEED_NUM_100M;
+		*speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case HNS3_CFG_SPEED_1G:
-		*speed = ETH_SPEED_NUM_1G;
+		*speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case HNS3_CFG_SPEED_10G:
-		*speed = ETH_SPEED_NUM_10G;
+		*speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case HNS3_CFG_SPEED_25G:
-		*speed = ETH_SPEED_NUM_25G;
+		*speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case HNS3_CFG_SPEED_40G:
-		*speed = ETH_SPEED_NUM_40G;
+		*speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case HNS3_CFG_SPEED_50G:
-		*speed = ETH_SPEED_NUM_50G;
+		*speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case HNS3_CFG_SPEED_100G:
-		*speed = ETH_SPEED_NUM_100G;
+		*speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case HNS3_CFG_SPEED_200G:
-		*speed = ETH_SPEED_NUM_200G;
+		*speed = RTE_ETH_SPEED_NUM_200G;
 		break;
 	default:
 		return -EINVAL;
@@ -3559,39 +3559,39 @@ hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
 	hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
 
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_10M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
 		break;
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
 		break;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
 		break;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
 		break;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
 			       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
 		break;
@@ -4254,14 +4254,14 @@ hns3_mac_init(struct hns3_hw *hw)
 	int ret;
 
 	pf->support_sfp_query = true;
-	mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+	mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
 		return ret;
 	}
 
-	mac->link_status = ETH_LINK_DOWN;
+	mac->link_status = RTE_ETH_LINK_DOWN;
 
 	return hns3_config_mtu(hw, pf->mps);
 }
@@ -4511,7 +4511,7 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	 * all packets coming in in the receiving direction.
 	 */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, false);
 		if (ret) {
 			hns3_err(hw, "failed to enable promiscuous mode due to "
@@ -4552,7 +4552,7 @@ hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	}
 	/* when promiscuous mode was disabled, restore the vlan filter status */
 	offloads = dev->data->dev_conf.rxmode.offloads;
-	if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = hns3_enable_vlan_filter(hns, true);
 		if (ret) {
 			hns3_err(hw, "failed to disable promiscuous mode due to"
@@ -4672,8 +4672,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 		mac_info->supported_speed =
 					rte_le_to_cpu_32(resp->supported_speed);
 		mac_info->support_autoneg = resp->autoneg_ability;
-		mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
-					: ETH_LINK_AUTONEG;
+		mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+					: RTE_ETH_LINK_AUTONEG;
 	} else {
 		mac_info->query_type = HNS3_DEFAULT_QUERY;
 	}
@@ -4684,8 +4684,8 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
 static uint8_t
 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
 {
-	if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
-		duplex = ETH_LINK_FULL_DUPLEX;
+	if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+		duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	return duplex;
 }
@@ -4735,7 +4735,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 		return ret;
 
 	/* Do nothing if no SFP */
-	if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+	if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
 		return 0;
 
 	/*
@@ -4762,7 +4762,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw)
 
 	/* Config full duplex for SFP */
 	return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
-				      ETH_LINK_FULL_DUPLEX);
+				      RTE_ETH_LINK_FULL_DUPLEX);
 }
 
 static void
@@ -4881,10 +4881,10 @@ hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
 	hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
 
 	/*
-	 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+	 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
 	 * when receiving frames. Otherwise, CRC will be stripped.
 	 */
-	if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
 	else
 		hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
@@ -4912,7 +4912,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret) {
 		hns3_err(hw, "get link status cmd failed %d", ret);
-		return ETH_LINK_DOWN;
+		return RTE_ETH_LINK_DOWN;
 	}
 
 	req = (struct hns3_link_status_cmd *)desc.data;
@@ -5094,19 +5094,19 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
 	struct hns3_mac *mac = &hw->mac;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		return HNS3_FIBER_LINK_SPEED_1G_BIT;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		return HNS3_FIBER_LINK_SPEED_10G_BIT;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		return HNS3_FIBER_LINK_SPEED_25G_BIT;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		return HNS3_FIBER_LINK_SPEED_40G_BIT;
-	case ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_50G:
 		return HNS3_FIBER_LINK_SPEED_50G_BIT;
-	case ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_100G:
 		return HNS3_FIBER_LINK_SPEED_100G_BIT;
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_200G:
 		return HNS3_FIBER_LINK_SPEED_200G_BIT;
 	default:
 		hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
@@ -5344,20 +5344,20 @@ hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_10M:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_10M:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
 		break;
-	case ETH_LINK_SPEED_10M_HD:
+	case RTE_ETH_LINK_SPEED_10M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_100M:
+	case RTE_ETH_LINK_SPEED_100M:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
 		break;
-	case ETH_LINK_SPEED_100M_HD:
+	case RTE_ETH_LINK_SPEED_100M_HD:
 		speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
 		break;
-	case ETH_LINK_SPEED_1G:
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
 		break;
 	default:
@@ -5373,26 +5373,26 @@ hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
 {
 	uint32_t speed_bit;
 
-	switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
-	case ETH_LINK_SPEED_1G:
+	switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+	case RTE_ETH_LINK_SPEED_1G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
 		break;
-	case ETH_LINK_SPEED_10G:
+	case RTE_ETH_LINK_SPEED_10G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
 		break;
-	case ETH_LINK_SPEED_25G:
+	case RTE_ETH_LINK_SPEED_25G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
 		break;
-	case ETH_LINK_SPEED_40G:
+	case RTE_ETH_LINK_SPEED_40G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
 		break;
-	case ETH_LINK_SPEED_50G:
+	case RTE_ETH_LINK_SPEED_50G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
 		break;
-	case ETH_LINK_SPEED_100G:
+	case RTE_ETH_LINK_SPEED_100G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
 		break;
-	case ETH_LINK_SPEED_200G:
+	case RTE_ETH_LINK_SPEED_200G:
 		speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
 		break;
 	default:
@@ -5427,28 +5427,28 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
 static inline uint32_t
 hns3_get_link_speed(uint32_t link_speeds)
 {
-	uint32_t speed = ETH_SPEED_NUM_NONE;
-
-	if (link_speeds & ETH_LINK_SPEED_10M ||
-	    link_speeds & ETH_LINK_SPEED_10M_HD)
-		speed = ETH_SPEED_NUM_10M;
-	if (link_speeds & ETH_LINK_SPEED_100M ||
-	    link_speeds & ETH_LINK_SPEED_100M_HD)
-		speed = ETH_SPEED_NUM_100M;
-	if (link_speeds & ETH_LINK_SPEED_1G)
-		speed = ETH_SPEED_NUM_1G;
-	if (link_speeds & ETH_LINK_SPEED_10G)
-		speed = ETH_SPEED_NUM_10G;
-	if (link_speeds & ETH_LINK_SPEED_25G)
-		speed = ETH_SPEED_NUM_25G;
-	if (link_speeds & ETH_LINK_SPEED_40G)
-		speed = ETH_SPEED_NUM_40G;
-	if (link_speeds & ETH_LINK_SPEED_50G)
-		speed = ETH_SPEED_NUM_50G;
-	if (link_speeds & ETH_LINK_SPEED_100G)
-		speed = ETH_SPEED_NUM_100G;
-	if (link_speeds & ETH_LINK_SPEED_200G)
-		speed = ETH_SPEED_NUM_200G;
+	uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+	if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+		speed = RTE_ETH_SPEED_NUM_10M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+	    link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+		speed = RTE_ETH_SPEED_NUM_100M;
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+		speed = RTE_ETH_SPEED_NUM_1G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+		speed = RTE_ETH_SPEED_NUM_10G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+		speed = RTE_ETH_SPEED_NUM_25G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+		speed = RTE_ETH_SPEED_NUM_40G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+		speed = RTE_ETH_SPEED_NUM_50G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+		speed = RTE_ETH_SPEED_NUM_100G;
+	if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+		speed = RTE_ETH_SPEED_NUM_200G;
 
 	return speed;
 }
@@ -5456,11 +5456,11 @@ hns3_get_link_speed(uint32_t link_speeds)
 static uint8_t
 hns3_get_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-	    (link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+	    (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 static int
@@ -5594,9 +5594,9 @@ hns3_apply_link_speed(struct hns3_hw *hw)
 	struct hns3_set_link_speed_cfg cfg;
 
 	memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
-	cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
-			ETH_LINK_AUTONEG : ETH_LINK_FIXED;
-	if (cfg.autoneg != ETH_LINK_AUTONEG) {
+	cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+			RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+	if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
 		cfg.speed = hns3_get_link_speed(conf->link_speeds);
 		cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
 	}
@@ -5869,7 +5869,7 @@ hns3_do_stop(struct hns3_adapter *hns)
 	ret = hns3_cfg_mac_mode(hw, false);
 	if (ret)
 		return ret;
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
@@ -6080,17 +6080,17 @@ hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	current_mode = hns3_get_current_fc_mode(dev);
 	switch (current_mode) {
 	case HNS3_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case HNS3_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case HNS3_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case HNS3_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	}
 
@@ -6236,7 +6236,7 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	int i;
 
 	rte_spinlock_lock(&hw->lock);
-	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = pf->local_max_tc;
 	else
 		dcb_info->nb_tcs = 1;
@@ -6536,7 +6536,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 		hns3_update_linkstatus_and_event(hw, false);
@@ -6826,7 +6826,7 @@ get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
 	 * in device of link speed
 	 * below 10 Gbps.
 	 */
-	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+	if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
 		*state = 0;
 		return 0;
 	}
@@ -6858,7 +6858,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
 	 * configured FEC mode is returned.
 	 * If link is up, current FEC mode is returned.
 	 */
-	if (hw->mac.link_status == ETH_LINK_DOWN) {
+	if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
 		ret = get_current_fec_auto_state(hw, &auto_state);
 		if (ret)
 			return ret;
@@ -6957,12 +6957,12 @@ get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
 	uint32_t cur_capa;
 
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		cur_capa = fec_capa[1].capa;
 		break;
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		cur_capa = fec_capa[0].capa;
 		break;
 	default:
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index fa08fadc9497..eb3470535363 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -190,10 +190,10 @@ struct hns3_mac {
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 	uint8_t media_type;
 	uint8_t phy_addr;
-	uint8_t link_duplex  : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
-	uint8_t link_status  : 1; /* ETH_LINK_[DOWN/UP] */
-	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
+	uint8_t link_duplex  : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint8_t link_status  : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;      /* RTE_ETH_SPEED_NUM_ */
 	/*
 	 * Some firmware versions support only the SFP speed query. In addition
 	 * to the SFP speed query, some firmware supports the query of the speed
@@ -1079,9 +1079,9 @@ static inline uint64_t
 hns3_txvlan_cap_get(struct hns3_hw *hw)
 {
 	if (hw->port_base_vlan_cfg.state)
-		return DEV_TX_OFFLOAD_VLAN_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	else
-		return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+		return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 }
 
 #endif /* _HNS3_ETHDEV_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 8e5df05aa285..c0c1f1c4c107 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	hw->adapter_state = HNS3_NIC_CONFIGURING;
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
 		ret = -EINVAL;
 		goto cfg_err;
 	}
 
 	/* When RSS is not configured, redirect the packet queue 0 */
-	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		goto cfg_err;
 
 	/* config hardware GRO */
-	gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		goto cfg_err;
@@ -935,32 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_TCP_CKSUM |
-				 DEV_RX_OFFLOAD_SCTP_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				 DEV_RX_OFFLOAD_SCATTER |
-				 DEV_RX_OFFLOAD_VLAN_STRIP |
-				 DEV_RX_OFFLOAD_VLAN_FILTER |
-				 DEV_RX_OFFLOAD_RSS_HASH |
-				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_IPV4_CKSUM |
-				 DEV_TX_OFFLOAD_TCP_CKSUM |
-				 DEV_TX_OFFLOAD_UDP_CKSUM |
-				 DEV_TX_OFFLOAD_SCTP_CKSUM |
-				 DEV_TX_OFFLOAD_MULTI_SEGS |
-				 DEV_TX_OFFLOAD_TCP_TSO |
-				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
-				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				 RTE_ETH_RX_OFFLOAD_SCATTER |
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
+				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
+	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-		info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (hns3_dev_get_support(hw, INDEP_TXRX))
 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1640,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	tmp_mask = (unsigned int)mask;
 
-	if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN filter */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = hns3vf_en_vlan_filter(hw, true);
 		else
 			ret = hns3vf_en_vlan_filter(hw, false);
@@ -1653,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 
 	/* Vlan stripping setting */
-	if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rte_spinlock_lock(&hw->lock);
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
 		else
 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1724,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
 	int ret;
 
 	dev_conf = &hw->data->dev_conf;
-	en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
 								   : false;
 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
 	if (ret)
@@ -1749,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
 	}
 
 	/* Apply vlan offload setting */
-	ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK);
+	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -2059,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	/*
 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2218,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	memset(&new_link, 0, sizeof(new_link));
 	switch (mac->link_speed) {
-	case ETH_SPEED_NUM_10M:
-	case ETH_SPEED_NUM_100M:
-	case ETH_SPEED_NUM_1G:
-	case ETH_SPEED_NUM_10G:
-	case ETH_SPEED_NUM_25G:
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_50G:
-	case ETH_SPEED_NUM_100G:
-	case ETH_SPEED_NUM_200G:
+	case RTE_ETH_SPEED_NUM_10M:
+	case RTE_ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_50G:
+	case RTE_ETH_SPEED_NUM_100G:
+	case RTE_ETH_SPEED_NUM_200G:
 		if (mac->link_status)
 			new_link.link_speed = mac->link_speed;
 		break;
 	default:
 		if (mac->link_status)
-			new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	}
 
 	if (!mac->link_status)
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
 	new_link.link_duplex = mac->link_duplex;
-	new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg =
-	    !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2570,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 		 * Make sure call update link status before hns3vf_stop_poll_job
 		 * because update link status depend on polling job exist.
 		 */
-		hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
 					  hw->mac.link_duplex);
 		hns3vf_stop_poll_job(eth_dev);
 	}
-	hw->mac.link_status = ETH_LINK_DOWN;
+	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
 	hns3_set_rxtx_function(eth_dev);
 	rte_wmb();
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 38a2ee58a651..da6918fddda3 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1298,10 +1298,10 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
 	 * Kunpeng930 and future kunpeng series support to use src/dst port
 	 * fields to RSS hash for IPv6 SCTP packet type.
 	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	    (rss->types & ETH_RSS_IP ||
+	if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+	    (rss->types & RTE_ETH_RSS_IP ||
 	    (!hw->rss_info.ipv6_sctp_offload_supported &&
-	    rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+	    rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 		return false;
 
 	return true;
diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c
index 5dfe68cc4dbd..9a829d7011ad 100644
--- a/drivers/net/hns3/hns3_ptp.c
+++ b/drivers/net/hns3/hns3_ptp.c
@@ -21,7 +21,7 @@ hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		return 0;
 
 	ret = rte_mbuf_dyn_rx_timestamp_register
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index 3a81e90e0911..85495bbe89d9 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -76,69 +76,69 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_tuple_table[] = {
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
-	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
 };
 
@@ -146,44 +146,44 @@ static const struct {
 	uint64_t rss_types;
 	uint64_t rss_field;
 } hns3_set_rss_types[] = {
-	{ ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+	{ RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
-	{ ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+	{ RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
 };
@@ -365,10 +365,10 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 	 * When user does not specify the following types or a combination of
 	 * the following types, it enables all fields for the supported RSS
 	 * types. the following types as:
-	 * - ETH_RSS_L3_SRC_ONLY
-	 * - ETH_RSS_L3_DST_ONLY
-	 * - ETH_RSS_L4_SRC_ONLY
-	 * - ETH_RSS_L4_DST_ONLY
+	 * - RTE_ETH_RSS_L3_SRC_ONLY
+	 * - RTE_ETH_RSS_L3_DST_ONLY
+	 * - RTE_ETH_RSS_L4_SRC_ONLY
+	 * - RTE_ETH_RSS_L4_DST_ONLY
 	 */
 	if (fields_count == 0) {
 		for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
@@ -520,8 +520,8 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev,
 	memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
 	       sizeof(rss_cfg->rss_indirection_tbl));
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
 			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
@@ -572,8 +572,8 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_lock(&hw->lock);
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
 						rss_cfg->rss_indirection_tbl[i];
@@ -692,7 +692,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	}
 
 	/* When RSS is off, redirect the packet queue 0 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
 	/* Configure RSS hash algorithm and hash key offset */
@@ -709,7 +709,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	 * When RSS is off, it doesn't need to configure rss redirection table
 	 * to hardware.
 	 */
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
 					       hw->rss_ind_tbl_size);
 		if (ret)
@@ -723,7 +723,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+	if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
 		ret1 = hns3_rss_reset_indir_table(hw);
 		if (ret1 != 0)
 			return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 996083b88b25..6f153a1b7bfb 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -8,20 +8,20 @@
 #include <rte_flow.h>
 
 #define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L3_SRC_ONLY | \
+	RTE_ETH_RSS_L3_DST_ONLY | \
+	RTE_ETH_RSS_L4_SRC_ONLY | \
+	RTE_ETH_RSS_L4_DST_ONLY)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 602548a4f25b..920ee8ceeab9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1924,7 +1924,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
 
 	/* CRC len set here is used for amending packet length */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1969,7 +1969,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
 						 rxq->rx_buf_len);
 	}
 
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 	    dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
 		dev->data->scattered_rx = true;
 }
@@ -2845,7 +2845,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	vec_allowed = vec_support && hns3_get_default_vec_support();
 	sve_allowed = vec_support && hns3_get_sve_support();
 	simple_allowed = !dev->data->scattered_rx &&
-			 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
 
 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
 		return hns3_recv_pkts_vec;
@@ -3139,7 +3139,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
 	int ret;
 
 	offloads = hw->data->dev_conf.rxmode.offloads;
-	gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 	ret = hns3_config_gro(hw, gro_en);
 	if (ret)
 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
@@ -4291,7 +4291,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return false;
 
-	return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
 }
 
 static bool
@@ -4303,16 +4303,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
 	return true;
 #else
 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
-		DEV_TX_OFFLOAD_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_CKSUM | \
-		DEV_TX_OFFLOAD_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_SCTP_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
-		DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO | \
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
 
 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index c8229e9076b5..dfea5d5b4c2f 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -307,7 +307,7 @@ struct hns3_rx_queue {
 	uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 	uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 
-	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+	/* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
 	/*
diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c
index ff434d2d33ed..455110361aac 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.c
+++ b/drivers/net/hns3/hns3_rxtx_vec.c
@@ -22,8 +22,8 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
 	if (hns3_dev_get_support(hw, PTP))
 		return -ENOTSUP;
 
-	/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
-	if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	/* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+	if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		return -ENOTSUP;
 
 	return 0;
@@ -228,10 +228,10 @@ hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
 int
 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
 {
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
-				 DEV_RX_OFFLOAD_VLAN;
+	uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+				 RTE_ETH_RX_OFFLOAD_VLAN;
 
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if (hns3_dev_get_support(hw, PTP))
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0a4db0891d4a..293df887bf7c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1629,7 +1629,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 
 	/* Set the global registers with default ether type value */
 	if (!pf->support_multi_driver) {
-		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					 RTE_ETHER_TYPE_VLAN);
 		if (ret != I40E_SUCCESS) {
 			PMD_INIT_LOG(ERR,
@@ -1896,8 +1896,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	ad->tx_simple_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Only legacy filter API needs the following fdir config. So when the
 	 * legacy filter API is deprecated, the following codes should also be
@@ -1931,13 +1931,13 @@ i40e_dev_configure(struct rte_eth_dev *dev)
 	 *  number, which will be available after rx_queue_setup(). dev_start()
 	 *  function is good to place RSS setup.
 	 */
-	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
 		ret = i40e_vmdq_setup(dev);
 		if (ret)
 			goto err;
 	}
 
-	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
 		ret = i40e_dcb_setup(dev);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
@@ -2214,17 +2214,17 @@ i40e_parse_link_speeds(uint16_t link_speeds)
 {
 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
 
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed |= I40E_LINK_SPEED_40GB;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed |= I40E_LINK_SPEED_25GB;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed |= I40E_LINK_SPEED_20GB;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed |= I40E_LINK_SPEED_10GB;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed |= I40E_LINK_SPEED_1GB;
-	if (link_speeds & ETH_LINK_SPEED_100M)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
 		link_speed |= I40E_LINK_SPEED_100MB;
 
 	return link_speed;
@@ -2332,13 +2332,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
 		     I40E_AQ_PHY_LINK_ENABLED;
 
-	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
-		conf->link_speeds = ETH_LINK_SPEED_40G |
-				    ETH_LINK_SPEED_25G |
-				    ETH_LINK_SPEED_20G |
-				    ETH_LINK_SPEED_10G |
-				    ETH_LINK_SPEED_1G |
-				    ETH_LINK_SPEED_100M;
+	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+				    RTE_ETH_LINK_SPEED_25G |
+				    RTE_ETH_LINK_SPEED_20G |
+				    RTE_ETH_LINK_SPEED_10G |
+				    RTE_ETH_LINK_SPEED_1G |
+				    RTE_ETH_LINK_SPEED_100M;
 
 		abilities |= I40E_AQ_PHY_AN_ENABLED;
 	} else {
@@ -2876,34 +2876,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 	/* Parse the link status */
 	switch (link_speed) {
 	case I40E_REG_SPEED_0:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_REG_SPEED_1:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_REG_SPEED_2:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_2_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		else
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_REG_SPEED_3:
 		if (hw->mac.type == I40E_MAC_X722) {
-			link->link_speed = ETH_SPEED_NUM_5G;
+			link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		} else {
 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
 
 			if (reg_val & I40E_REG_MACC_25GB)
-				link->link_speed = ETH_SPEED_NUM_25G;
+				link->link_speed = RTE_ETH_SPEED_NUM_25G;
 			else
-				link->link_speed = ETH_SPEED_NUM_40G;
+				link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		}
 		break;
 	case I40E_REG_SPEED_4:
 		if (hw->mac.type == I40E_MAC_X722)
-			link->link_speed = ETH_SPEED_NUM_10G;
+			link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		else
-			link->link_speed = ETH_SPEED_NUM_20G;
+			link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -2930,8 +2930,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 		status = i40e_aq_get_link_info(hw, enable_lse,
 						&link_status, NULL);
 		if (unlikely(status != I40E_SUCCESS)) {
-			link->link_speed = ETH_SPEED_NUM_NONE;
-			link->link_duplex = ETH_LINK_FULL_DUPLEX;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			return;
 		}
@@ -2946,28 +2946,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case I40E_LINK_SPEED_100MB:
-		link->link_speed = ETH_SPEED_NUM_100M;
+		link->link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		link->link_speed = ETH_SPEED_NUM_20G;
+		link->link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case I40E_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case I40E_LINK_SPEED_40GB:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	default:
 		if (link->link_status)
-			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		else
-			link->link_speed = ETH_SPEED_NUM_NONE;
+			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 }
@@ -2984,9 +2984,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(link));
 
 	/* i40e uses full duplex only */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	if (!wait_to_complete && !enable_lse)
 		update_link_reg(hw, &link);
@@ -3720,33 +3720,33 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -3805,7 +3805,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
 		/* For XL710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_40G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
 		dev_info->default_rxportconf.nb_queues = 2;
 		dev_info->default_txportconf.nb_queues = 2;
 		if (dev->data->nb_rx_queues == 1)
@@ -3819,17 +3819,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
 		/* For XXV710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_25G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
 		dev_info->default_rxportconf.ring_size = 256;
 		dev_info->default_txportconf.ring_size = 256;
 	} else {
 		/* For X710 */
-		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 		dev_info->default_rxportconf.nb_queues = 1;
 		dev_info->default_txportconf.nb_queues = 1;
-		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
 			dev_info->default_rxportconf.ring_size = 512;
 			dev_info->default_txportconf.ring_size = 256;
 		} else {
@@ -3868,7 +3868,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
 	int ret;
 
 	if (qinq) {
-		if (vlan_type == ETH_VLAN_TYPE_OUTER)
+		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 			reg_id = 2;
 	}
 
@@ -3915,12 +3915,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		   DEV_RX_OFFLOAD_VLAN_EXTEND;
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	int ret = 0;
 
-	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
-	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
-	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
 		PMD_DRV_LOG(ERR,
 			    "Unsupported vlan type.");
 		return -EINVAL;
@@ -3934,12 +3934,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 	/* 802.1ad frames ability is added in NVM API 1.7*/
 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
 		if (qinq) {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->first_tag = rte_cpu_to_le_16(tpid);
-			else if (vlan_type == ETH_VLAN_TYPE_INNER)
+			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		} else {
-			if (vlan_type == ETH_VLAN_TYPE_OUTER)
+			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
 				hw->second_tag = rte_cpu_to_le_16(tpid);
 		}
 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
@@ -3998,37 +3998,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
 			i40e_vsi_config_double_vlan(vsi, TRUE);
 			/* Set global registers with default ethertype. */
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					   RTE_ETHER_TYPE_VLAN);
-			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
 					   RTE_ETHER_TYPE_VLAN);
 		}
 		else
 			i40e_vsi_config_double_vlan(vsi, FALSE);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 		/* Enable or disable outer VLAN stripping */
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
 		else
 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
@@ -4111,17 +4111,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	 /* Return current mode according to actual setting*/
 	switch (hw->fc.current_mode) {
 	case I40E_FC_FULL:
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	case I40E_FC_TX_PAUSE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case I40E_FC_RX_PAUSE:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case I40E_FC_NONE:
 	default:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	};
 
 	return 0;
@@ -4137,10 +4137,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	struct i40e_hw *hw;
 	struct i40e_pf *pf;
 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
-		[RTE_FC_NONE] = I40E_FC_NONE,
-		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
-		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
-		[RTE_FC_FULL] = I40E_FC_FULL
+		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
+		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+		[RTE_ETH_FC_FULL] = I40E_FC_FULL
 	};
 
 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
@@ -4287,7 +4287,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	}
 
 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
 	else
 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
@@ -4440,7 +4440,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4456,8 +4456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4483,7 +4483,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	int ret;
 
 	if (reta_size != lut_size ||
-		reta_size > ETH_RSS_RETA_SIZE_512) {
+		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
 		PMD_DRV_LOG(ERR,
 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
 			reta_size, lut_size);
@@ -4500,8 +4500,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 	if (ret)
 		goto out;
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -4818,7 +4818,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
 				hw->func_caps.num_vsis - vsi_count);
 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-				ETH_64_POOLS);
+				RTE_ETH_64_POOLS);
 			if (pf->max_nb_vmdq_vsi) {
 				pf->flags |= I40E_FLAG_VMDQ;
 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
@@ -6104,10 +6104,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK |
-	       ETH_QINQ_STRIP_MASK |
-	       ETH_VLAN_FILTER_MASK |
-	       ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK |
+	       RTE_ETH_QINQ_STRIP_MASK |
+	       RTE_ETH_VLAN_FILTER_MASK |
+	       RTE_ETH_VLAN_EXTEND_MASK;
 	ret = i40e_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
@@ -6236,9 +6236,9 @@ i40e_pf_setup(struct i40e_pf *pf)
 
 	/* Configure filter control */
 	memset(&settings, 0, sizeof(settings));
-	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
-	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
 	else {
 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
@@ -7098,7 +7098,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
 {
 	uint32_t vid_idx, vid_bit;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return 0;
 
 	vid_idx = I40E_VFTA_IDX(vlan_id);
@@ -7133,7 +7133,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
 	int ret;
 
-	if (vlan_id > ETH_VLAN_ID_MAX)
+	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
 		return;
 
 	i40e_store_vlan_filter(vsi, vlan_id, on);
@@ -7727,25 +7727,25 @@ static int
 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
 {
 	switch (filter_type) {
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_IMAC_TENID:
+	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
 		break;
-	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_IMAC:
+	case RTE_ETH_TUNNEL_FILTER_IMAC:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 		break;
-	case ETH_TUNNEL_FILTER_OIP:
+	case RTE_ETH_TUNNEL_FILTER_OIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
 		break;
-	case ETH_TUNNEL_FILTER_IIP:
+	case RTE_ETH_TUNNEL_FILTER_IIP:
 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 		break;
 	default:
@@ -8711,16 +8711,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8746,12 +8746,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -1;
 		break;
@@ -8843,7 +8843,7 @@ int
 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->adapter->hw;
-	uint8_t lut[ETH_RSS_RETA_SIZE_512];
+	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 	uint32_t i;
 	int num;
 
@@ -8851,7 +8851,7 @@ i40e_pf_reset_rss_reta(struct i40e_pf *pf)
 	 * configured. It's necessary to calculate the actual PF
 	 * queues that are configured.
 	 */
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		num = i40e_pf_calc_configured_queues_num(pf);
 	else
 		num = pf->dev_data->nb_rx_queues;
@@ -8930,7 +8930,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
-	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		return 0;
 
 	hw = I40E_PF_TO_HW(pf);
@@ -10267,16 +10267,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_40G:
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_25G:
 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
 		break;
@@ -10504,7 +10504,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev,
 	else
 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
 
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		dcb_cfg->pfc.willing = 0;
 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 		dcb_cfg->pfc.pfcenable = *tc_map;
@@ -11012,7 +11012,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint16_t bsf, tc_mapping;
 	int i, j = 0;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
 	else
 		dcb_info->nb_tcs = 1;
@@ -11060,7 +11060,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
 		}
 		j++;
-	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
 	return 0;
 }
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1d57b9617e66..d8042abbd9be 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -147,17 +147,17 @@ enum i40e_flxpld_layer_idx {
 		       I40E_FLAG_RSS_AQ_CAPABLE)
 
 #define I40E_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /* All bits of RSS hash enable for X722*/
 #define I40E_RSS_HENA_ALL_X722 ( \
@@ -1063,7 +1063,7 @@ struct i40e_rte_flow_rss_conf {
 	uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)];		/**< Hash key. */
-	uint16_t queue[ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
+	uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512];	/**< Queues indices to use. */
 
 	bool symmetric_enable;		/**< true, if enable symmetric */
 	uint64_t config_pctypes;	/**< All PCTYPES with the flow  */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e41a84f1d737..9acaa1875105 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2015,7 +2015,7 @@ i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int qinq = dev->data->dev_conf.rxmode.offloads &
-		DEV_RX_OFFLOAD_VLAN_EXTEND;
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 	uint64_t reg_r = 0;
 	uint16_t reg_id;
 	uint16_t tpid;
@@ -3601,13 +3601,13 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
 }
 
 static uint16_t i40e_supported_tunnel_filter_types[] = {
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
-	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
-	ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
-	ETH_TUNNEL_FILTER_IMAC,
-	ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+	RTE_ETH_TUNNEL_FILTER_IMAC,
+	RTE_ETH_TUNNEL_FILTER_IMAC,
 };
 
 static int
@@ -3697,12 +3697,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 			break;
@@ -3724,7 +3724,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -3798,7 +3798,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
 					   vxlan_spec->vni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			vxlan_flag = 1;
@@ -3927,12 +3927,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					rte_memcpy(&filter->outer_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_OMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
 				} else {
 					rte_memcpy(&filter->inner_mac,
 						   &eth_spec->dst,
 						   RTE_ETHER_ADDR_LEN);
-					filter_type |= ETH_TUNNEL_FILTER_IMAC;
+					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
 				}
 			}
 
@@ -3955,7 +3955,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
 					      I40E_VLAN_TCI_MASK;
-				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
@@ -4050,7 +4050,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 					   nvgre_spec->tni, 3);
 				filter->tenant_id =
 					rte_be_to_cpu_32(tenant_id_be);
-				filter_type |= ETH_TUNNEL_FILTER_TENID;
+				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
 			}
 
 			nvgre_flag = 1;
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
index 5da3d187076e..8962e9d97aa7 100644
--- a/drivers/net/i40e/i40e_hash.c
+++ b/drivers/net/i40e/i40e_hash.c
@@ -105,47 +105,47 @@ struct i40e_hash_map_rss_inset {
 
 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
 	/* IPv4 */
-	{ ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
-	{ ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+	{ RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
 
-	{ ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* IPv6 */
-	{ ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
-	{ ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+	{ RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_OTHER,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	  I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
 
-	{ ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
-	{ ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+	{ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
 	  I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
 
 	/* Port */
-	{ ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+	{ RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
 
 	/* Ether */
-	{ ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
-	{ ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+	{ RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+	{ RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
 
 	/* VLAN */
-	{ ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
-	{ ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+	{ RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+	{ RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
 };
 
 #define I40E_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
@@ -208,30 +208,30 @@ struct i40e_hash_match_pattern {
 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
 	pattern, rss_mask, true, cus_pctype }
 
-#define I40E_HASH_L2_RSS_MASK		(ETH_RSS_VLAN | ETH_RSS_ETH | \
-					ETH_RSS_L2_SRC_ONLY | \
-					ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK		(RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+					RTE_ETH_RSS_L2_SRC_ONLY | \
+					RTE_ETH_RSS_L2_DST_ONLY)
 
 #define I40E_HASH_L23_RSS_MASK		(I40E_HASH_L2_RSS_MASK | \
-					ETH_RSS_L3_SRC_ONLY | \
-					ETH_RSS_L3_DST_ONLY)
+					RTE_ETH_RSS_L3_SRC_ONLY | \
+					RTE_ETH_RSS_L3_DST_ONLY)
 
-#define I40E_HASH_IPV4_L23_RSS_MASK	(ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK	(ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK	(RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK	(RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
 
 #define I40E_HASH_L234_RSS_MASK		(I40E_HASH_L23_RSS_MASK | \
-					ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
-					ETH_RSS_L4_DST_ONLY)
+					RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+					RTE_ETH_RSS_L4_DST_ONLY)
 
-#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK	(I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
 
-#define I40E_HASH_L4_TYPES		(ETH_RSS_NONFRAG_IPV4_TCP | \
-					ETH_RSS_NONFRAG_IPV4_UDP | \
-					ETH_RSS_NONFRAG_IPV4_SCTP | \
-					ETH_RSS_NONFRAG_IPV6_TCP | \
-					ETH_RSS_NONFRAG_IPV6_UDP | \
-					ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES		(RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* Current supported patterns and RSS types.
  * All items that have the same pattern types are together.
@@ -239,72 +239,72 @@ struct i40e_hash_match_pattern {
 static const struct i40e_hash_match_pattern match_patterns[] = {
 	/* Ether */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
-			      ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+			      RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
 			      I40E_FILTER_PCTYPE_L2_PAYLOAD),
 
 	/* IPv4 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV4),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
-			      ETH_RSS_NONFRAG_IPV4_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
 			      I40E_HASH_IPV4_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
-			      ETH_RSS_NONFRAG_IPV4_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
-			      ETH_RSS_NONFRAG_IPV4_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
-			      ETH_RSS_NONFRAG_IPV4_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
 			      I40E_HASH_IPV4_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
 
 	/* IPv6 */
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
-			      ETH_RSS_NONFRAG_IPV6_OTHER |
+			      RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
 			      I40E_HASH_IPV6_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
-			      ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+			      RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
 			      I40E_FILTER_PCTYPE_FRAG_IPV6),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
-			      ETH_RSS_NONFRAG_IPV6_TCP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
-			      ETH_RSS_NONFRAG_IPV6_UDP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_UDP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
 
 	I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
-			      ETH_RSS_NONFRAG_IPV6_SCTP |
+			      RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
 			      I40E_HASH_IPV6_L234_RSS_MASK,
 			      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
 
 	/* ESP */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
-				  ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+				  RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
 
 	/* GTPC */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
@@ -319,27 +319,27 @@ static const struct i40e_hash_match_pattern match_patterns[] = {
 				  I40E_HASH_IPV4_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
 				  I40E_HASH_IPV6_L234_RSS_MASK,
 				  I40E_CUSTOMIZED_GTPU),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
-				  ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+				  RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
 
 	/* L2TPV3 */
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
 	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
-				  ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+				  RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
 
 	/* AH */
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV4),
-	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+	I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
 				  I40E_CUSTOMIZED_AH_IPV6),
 };
 
@@ -575,29 +575,29 @@ i40e_hash_get_inset(uint64_t rss_types)
 	/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
 	 * it is the same case as none of them are added.
 	 */
-	mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
-	if (mask == ETH_RSS_L2_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
 		inset &= ~I40E_INSET_DMAC;
-	else if (mask == ETH_RSS_L2_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
 		inset &= ~I40E_INSET_SMAC;
 
-	mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
-	if (mask == ETH_RSS_L3_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
 		inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
-	else if (mask == ETH_RSS_L3_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
 		inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
 
-	mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-	if (mask == ETH_RSS_L4_SRC_ONLY)
+	mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+	if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
 		inset &= ~I40E_INSET_DST_PORT;
-	else if (mask == ETH_RSS_L4_DST_ONLY)
+	else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
 		inset &= ~I40E_INSET_SRC_PORT;
 
 	if (rss_types & I40E_HASH_L4_TYPES) {
 		uint64_t l3_mask = rss_types &
-				   (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+				   (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 		uint64_t l4_mask = rss_types &
-				   (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+				   (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 		if (l3_mask && !l4_mask)
 			inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
@@ -836,7 +836,7 @@ i40e_hash_config(struct i40e_pf *pf,
 
 	/* Update lookup table */
 	if (rss_info->queue_num > 0) {
-		uint8_t lut[ETH_RSS_RETA_SIZE_512];
+		uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
 		uint32_t i, j = 0;
 
 		for (i = 0; i < hw->func_caps.rss_table_size; i++) {
@@ -943,7 +943,7 @@ i40e_hash_parse_queues(const struct rte_eth_dev *dev,
 			    "RSS key is ignored when queues specified");
 
 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
 		max_queue = i40e_pf_calc_configured_queues_num(pf);
 	else
 		max_queue = pf->dev_data->nb_rx_queues;
@@ -1081,22 +1081,22 @@ i40e_hash_validate_rss_types(uint64_t rss_types)
 	uint64_t type, mask;
 
 	/* Validate L2 */
-	type = ETH_RSS_ETH & rss_types;
-	mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+	type = RTE_ETH_RSS_ETH & rss_types;
+	mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L3 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-	       ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
-	       ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
-	mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+	       RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+	       RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+	mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
 	/* Validate L4 */
-	type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
-	mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+	type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+	mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
 	if (!type && mask)
 		return false;
 
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index e2d8b2b5f7f1..ccb3924a5f68 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1207,24 +1207,24 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 	event.event_data.link_event.link_status =
 		dev->data->dev_link.link_status;
 
-	/* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+	/* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
 	switch (dev->data->dev_link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
 		break;
-	case ETH_SPEED_NUM_20G:
+	case RTE_ETH_SPEED_NUM_20G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
 		break;
-	case ETH_SPEED_NUM_25G:
+	case RTE_ETH_SPEED_NUM_25G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
 		break;
-	case ETH_SPEED_NUM_40G:
+	case RTE_ETH_SPEED_NUM_40G:
 		event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 		break;
 	default:
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 554b1142c136..a13bb81115f4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1329,7 +1329,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	for (i = 0; i < tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		if (k) {
 			for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
 				for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
@@ -1995,7 +1995,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->queue_id = queue_idx;
 	rxq->reg_idx = reg_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2243,7 +2243,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 	}
 	/* check simple tx conflict */
 	if (ad->tx_simple_allowed) {
-		if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+		if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
 				txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
 			PMD_DRV_LOG(ERR, "No-simple tx is required.");
 			return -EINVAL;
@@ -3417,7 +3417,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		 txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
 	ad->tx_vec_allowed = (ad->tx_simple_allowed &&
 			txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 2301e6301d7d..5e6eecc50116 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -120,7 +120,7 @@ struct i40e_rx_queue {
 	bool rx_deferred_start; /**< don't start this queue in dev start */
 	uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
 	uint8_t dcb_tc;         /**< Traffic class of rx queue */
-	uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
@@ -166,7 +166,7 @@ struct i40e_tx_queue {
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 4ffe030fcb64..7abc0821d119 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -900,7 +900,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index f52e3c567558..f9a7f4655050 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -100,7 +100,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	  */
 	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < n; i++) {
 			free[i] = txep[i].mbuf;
 			txep[i].mbuf = NULL;
@@ -211,7 +211,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 	struct i40e_rx_queue *rxq;
 	uint16_t desc, i;
 	bool first_queue;
@@ -221,11 +221,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	 /* no header split support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	/* no QinQ support */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 		return -1;
 
 	/**
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 12d5a2e48a9b..663c46b91dc5 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -42,30 +42,30 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
 		sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_MULTI_SEGS  |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -385,19 +385,19 @@ i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
 		return -EINVAL;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* Enable or disable VLAN filtering offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			return i40e_vsi_config_vlan_filter(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_filter(vsi, FALSE);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping offload */
 		if (ethdev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_STRIP)
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			return i40e_vsi_config_vlan_stripping(vsi, TRUE);
 		else
 			return i40e_vsi_config_vlan_stripping(vsi, FALSE);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af4734..12f541f53926 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -50,18 +50,18 @@
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 |         \
-	ETH_RSS_NONFRAG_IPV4_TCP |  \
-	ETH_RSS_NONFRAG_IPV4_UDP |  \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 |         \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP |  \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 611f1f7722b0..df44df772e4e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -266,53 +266,53 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	static const uint64_t map_hena_rss[] = {
 		/* IPv4 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
-				ETH_RSS_NONFRAG_IPV4_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
-				ETH_RSS_NONFRAG_IPV4_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
-				ETH_RSS_NONFRAG_IPV4_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
-				ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+				RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
 
 		/* IPv6 */
 		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
-				ETH_RSS_NONFRAG_IPV6_UDP,
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
-				ETH_RSS_NONFRAG_IPV6_TCP,
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
-				ETH_RSS_NONFRAG_IPV6_SCTP,
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
 		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
-				ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+				RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
 
 		/* L2 Payload */
-		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
 	};
 
-	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
-				  ETH_RSS_NONFRAG_IPV4_TCP |
-				  ETH_RSS_NONFRAG_IPV4_SCTP |
-				  ETH_RSS_NONFRAG_IPV4_OTHER |
-				  ETH_RSS_FRAG_IPV4;
+	const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV4;
 
-	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_NONFRAG_IPV6_SCTP |
-				  ETH_RSS_NONFRAG_IPV6_OTHER |
-				  ETH_RSS_FRAG_IPV6;
+	const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+				  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+				  RTE_ETH_RSS_FRAG_IPV6;
 
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
@@ -331,13 +331,13 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	/**
-	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
 	 * generalizations of all other IPv4 and IPv6 RSS types.
 	 */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_hf |= ipv4_rss;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_hf |= ipv6_rss;
 
 	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
@@ -363,10 +363,10 @@ iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 	}
 
 	if (valid_rss_hf & ipv4_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
 
 	if (valid_rss_hf & ipv6_rss)
-		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
 
 	if (rss_hf & ~valid_rss_hf)
 		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
@@ -467,7 +467,7 @@ iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 		return 0;
 
 	enable = !!(dev->data->dev_conf.txmode.offloads &
-		    DEV_TX_OFFLOAD_VLAN_INSERT);
+		    RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	iavf_config_vlan_insert_v2(adapter, enable);
 
 	return 0;
@@ -479,10 +479,10 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev)
 	int err;
 
 	err = iavf_dev_vlan_offload_set(dev,
-					ETH_VLAN_STRIP_MASK |
-					ETH_QINQ_STRIP_MASK |
-					ETH_VLAN_FILTER_MASK |
-					ETH_VLAN_EXTEND_MASK);
+					RTE_ETH_VLAN_STRIP_MASK |
+					RTE_ETH_QINQ_STRIP_MASK |
+					RTE_ETH_VLAN_FILTER_MASK |
+					RTE_ETH_VLAN_EXTEND_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 		return err;
@@ -512,8 +512,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_vec_allowed = true;
 	ad->tx_vec_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Large VF setting */
 	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
@@ -611,7 +611,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -961,34 +961,34 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
@@ -1048,42 +1048,42 @@ iavf_dev_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (vf->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = vf->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -1231,14 +1231,14 @@ iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
 	bool enable;
 	int err;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 
 		iavf_iterate_vlan_filters_v2(dev, enable);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		err = iavf_config_vlan_strip_v2(adapter, enable);
 		/* If not support, the stripping is already disabled by PF */
@@ -1267,9 +1267,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		/* Enable or disable VLAN stripping */
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			err = iavf_enable_vlan_strip(adapter);
 		else
 			err = iavf_disable_vlan_strip(adapter);
@@ -1311,8 +1311,8 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(lut, vf->rss_lut, reta_size);
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -1348,8 +1348,8 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = vf->rss_lut[i];
 	}
@@ -1556,7 +1556,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	ret = iavf_query_stats(adapter, &pstats);
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
-					 DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
 					 RTE_ETHER_CRC_LEN;
 		iavf_update_stats(vsi, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 01724cd569dd..55d8a11da388 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -395,90 +395,90 @@ struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define IAVF_RSS_TYPE_OUTER_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 /* VLAN IPV4 */
 #define IAVF_RSS_TYPE_VLAN_IPV4		(IAVF_RSS_TYPE_OUTER_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV4_SCTP	(IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_SCTP	(IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4	ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4	RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP	(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6	ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6	RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP	(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 /* GTPU IPv4 */
 #define IAVF_RSS_TYPE_GTPU_IPV4		(IAVF_RSS_TYPE_INNER_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_UDP	(IAVF_RSS_TYPE_INNER_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV4_TCP	(IAVF_RSS_TYPE_INNER_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define IAVF_RSS_TYPE_GTPU_IPV6		(IAVF_RSS_TYPE_INNER_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_UDP	(IAVF_RSS_TYPE_INNER_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define IAVF_RSS_TYPE_GTPU_IPV6_TCP	(IAVF_RSS_TYPE_INNER_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /**
  * Supported pattern for hash.
@@ -496,7 +496,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv4_udp,		IAVF_RSS_TYPE_VLAN_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_tcp,		IAVF_RSS_TYPE_VLAN_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv4_sctp,		IAVF_RSS_TYPE_VLAN_IPV4_SCTP,	&outer_ipv4_sctp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpu,			ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gtpu,			RTE_ETH_RSS_IPV4,			&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
@@ -538,9 +538,9 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
-	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
-	{iavf_pattern_eth_ecpri,			ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
-	{iavf_pattern_eth_ipv4_ecpri,			ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_gtpc,			RTE_ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ecpri,			RTE_ETH_RSS_ECPRI,			&eth_ecpri_tmplt},
+	{iavf_pattern_eth_ipv4_ecpri,			RTE_ETH_RSS_ECPRI,			&ipv4_ecpri_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
@@ -565,7 +565,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpu,			ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gtpu,			RTE_ETH_RSS_IPV6,			&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
@@ -607,7 +607,7 @@ static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
-	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv6_gtpc,			RTE_ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
@@ -648,52 +648,52 @@ iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 	struct virtchnl_rss_cfg rss_cfg;
 
 #define IAVF_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
 		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
@@ -855,28 +855,28 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 		hdr = &proto_hdrs->proto_hdr[i];
 		switch (hdr->type) {
 		case VIRTCHNL_PROTO_HDR_ETH:
-			if (!(rss_type & ETH_RSS_ETH))
+			if (!(rss_type & RTE_ETH_RSS_ETH))
 				hdr->field_selector = 0;
-			else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_DST);
-			else if (rss_type & ETH_RSS_L2_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 				REFINE_PROTO_FLD(DEL, ETH_SRC);
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4) {
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
-				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -884,39 +884,39 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
 			if (rss_type &
-			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			     ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_FRAG_IPV4)
+			    (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			     RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
 					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_IPV4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
 
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			     ETH_RSS_NONFRAG_IPV6_UDP |
-			     ETH_RSS_NONFRAG_IPV6_TCP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			    (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
-				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+				} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					   (ETH_RSS_L4_SRC_ONLY |
-					    ETH_RSS_L4_DST_ONLY)) {
+					   (RTE_ETH_RSS_L4_SRC_ONLY |
+					    RTE_ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -933,7 +933,7 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			}
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
 			else
 				hdr->field_selector = 0;
@@ -941,87 +941,87 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_UDP |
-			     ETH_RSS_NONFRAG_IPV6_UDP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_TCP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_TCP |
-			     ETH_RSS_NONFRAG_IPV6_TCP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_SCTP:
 			if (rss_type &
-			    (ETH_RSS_NONFRAG_IPV4_SCTP |
-			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
-				if (rss_type & ETH_RSS_L4_SRC_ONLY)
+			    (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			     RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+				if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
-				else if (rss_type & ETH_RSS_L4_DST_ONLY)
+				else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 					REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
 				else if (rss_type &
-					 (ETH_RSS_L3_SRC_ONLY |
-					  ETH_RSS_L3_DST_ONLY))
+					 (RTE_ETH_RSS_L3_SRC_ONLY |
+					  RTE_ETH_RSS_L3_DST_ONLY))
 					hdr->field_selector = 0;
 			} else {
 				hdr->field_selector = 0;
 			}
 
-			if (rss_type & ETH_RSS_L4_CHKSUM)
+			if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 				REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
 			break;
 		case VIRTCHNL_PROTO_HDR_S_VLAN:
-			if (!(rss_type & ETH_RSS_S_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_S_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_C_VLAN:
-			if (!(rss_type & ETH_RSS_C_VLAN))
+			if (!(rss_type & RTE_ETH_RSS_C_VLAN))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_L2TPV3:
-			if (!(rss_type & ETH_RSS_L2TPV3))
+			if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ESP:
-			if (!(rss_type & ETH_RSS_ESP))
+			if (!(rss_type & RTE_ETH_RSS_ESP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_AH:
-			if (!(rss_type & ETH_RSS_AH))
+			if (!(rss_type & RTE_ETH_RSS_AH))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_PFCP:
-			if (!(rss_type & ETH_RSS_PFCP))
+			if (!(rss_type & RTE_ETH_RSS_PFCP))
 				hdr->field_selector = 0;
 			break;
 		case VIRTCHNL_PROTO_HDR_ECPRI:
-			if (!(rss_type & ETH_RSS_ECPRI))
+			if (!(rss_type & RTE_ETH_RSS_ECPRI))
 				hdr->field_selector = 0;
 			break;
 		default:
@@ -1038,7 +1038,7 @@ iavf_refine_proto_hdrs_gtpu(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr;
 	int i;
 
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	for (i = 0; i < proto_hdrs->count; i++) {
@@ -1163,10 +1163,10 @@ static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -1177,27 +1177,27 @@ struct rss_attr_type {
 	uint64_t type;
 };
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE64)
 
 #define INVALID_RSS_ATTR	(RTE_ETH_RSS_L3_PRE32	| \
@@ -1207,9 +1207,9 @@ struct rss_attr_type {
 				 RTE_ETH_RSS_L3_PRE96)
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE64,				VALID_RSS_IPV6},
 	{INVALID_RSS_ATTR,				0}
@@ -1226,15 +1226,15 @@ iavf_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c1027..ac4db117f5cd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -617,7 +617,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->vsi = vsi;
 	rxq->offloads = offloads;
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e123..2d7f6b1b2dca 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,22 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		DEV_RX_OFFLOAD_CHECKSUM |		 \
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_RX_OFFLOAD_VLAN |		 \
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
+		RTE_ETH_RX_OFFLOAD_VLAN |		 \
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 72a4fcab04a5..b47c51b8ebe4 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -906,7 +906,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 		    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
@@ -958,7 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					(_mm256_castsi128_si256(raw_desc_bh0),
 					raw_desc_bh1, 1);
 
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 12375d3d80bd..b8f2f69f12fc 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1141,7 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+			if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1193,7 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						(_mm256_castsi128_si256(raw_desc_bh0),
 						 raw_desc_bh1, 1);
 
-				if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+				if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
@@ -1721,7 +1721,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
 								rte_lcore_id());
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e298..1de43b9b8ee2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -819,7 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index c9c01a14e349..7b7df5eebb6d 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -835,7 +835,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
 		/* set all lut items to default queue */
 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index ebd8ca57ef5f..1cda2db00e56 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -95,7 +95,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
-	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
@@ -582,7 +582,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -644,7 +644,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	}
 
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 	hw->tm_conf.committed = false;
 
@@ -660,8 +660,8 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	return 0;
 }
@@ -683,27 +683,27 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -933,42 +933,42 @@ ice_dcf_link_update(struct rte_eth_dev *dev,
 	 */
 	switch (hw->link_speed) {
 	case 10:
-		new_link.link_speed = ETH_SPEED_NUM_10M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case 100:
-		new_link.link_speed = ETH_SPEED_NUM_100M;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case 1000:
-		new_link.link_speed = ETH_SPEED_NUM_1G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case 10000:
-		new_link.link_speed = ETH_SPEED_NUM_10G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case 20000:
-		new_link.link_speed = ETH_SPEED_NUM_20G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case 25000:
-		new_link.link_speed = ETH_SPEED_NUM_25G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case 40000:
-		new_link.link_speed = ETH_SPEED_NUM_40G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case 50000:
-		new_link.link_speed = ETH_SPEED_NUM_50G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case 100000:
-		new_link.link_speed = ETH_SPEED_NUM_100G;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	default:
-		new_link.link_speed = ETH_SPEED_NUM_NONE;
+		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
-	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	new_link.link_status = hw->link_up ? ETH_LINK_UP :
-					     ETH_LINK_DOWN;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+					     RTE_ETH_LINK_DOWN;
 	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
@@ -987,11 +987,11 @@ ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
 					udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
 					udp_tunnel->udp_port);
 		break;
@@ -1018,8 +1018,8 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
-	case RTE_TUNNEL_TYPE_ECPRI:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_ECPRI:
 		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 44fb38dbe7b1..b9fcfc80ad9b 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -37,7 +37,7 @@ ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -45,7 +45,7 @@ ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
 static int
 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -143,28 +143,28 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_RSS_HASH;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -246,9 +246,9 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		return -ENOTSUP;
 
 	/* Vlan stripping setting */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		bool enable = !!(dev_conf->rxmode.offloads &
-				 DEV_RX_OFFLOAD_VLAN_STRIP);
+				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
 			PMD_DRV_LOG(ERR,
@@ -345,7 +345,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!ice_dcf_vlan_offload_ena(repr))
 		return -ENOTSUP;
 
-	if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
 		PMD_DRV_LOG(ERR,
 			    "Can accelerate only outer VLAN in QinQ\n");
 		return -EINVAL;
@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
 
 	if (repr->outer_vlan_info.stripping_ena) {
 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
-						       ETH_VLAN_STRIP_MASK);
+						       RTE_ETH_VLAN_STRIP_MASK);
 		if (err) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to reset VLAN stripping : %d\n",
@@ -449,7 +449,7 @@ ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
 	int err;
 
 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
-					       ETH_VLAN_STRIP_MASK);
+					       RTE_ETH_VLAN_STRIP_MASK);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
 		return err;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index edbc74632711..6a6637a15af7 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1487,9 +1487,9 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	TAILQ_INIT(&vsi->mac_list);
 	TAILQ_INIT(&vsi->vlan_list);
 
-	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
-			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
 			hw->func_caps.common_cap.rss_table_size;
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
@@ -2993,14 +2993,14 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	int ret;
 
 #define ICE_RSS_HF_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
 	if (ret)
@@ -3010,7 +3010,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	cfg.symm = 0;
 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
 	/* Configure RSS for IPv4 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3020,7 +3020,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for IPv6 with src/dst addr as input set */
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
@@ -3030,7 +3030,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3041,7 +3041,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for udp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3052,7 +3052,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3063,7 +3063,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3074,7 +3074,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
@@ -3085,7 +3085,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 	}
 
 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
@@ -3095,7 +3095,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
@@ -3105,7 +3105,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
 				ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
@@ -3115,7 +3115,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
@@ -3125,7 +3125,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
@@ -3135,7 +3135,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
@@ -3145,7 +3145,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 				    __func__, ret);
 	}
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
@@ -3288,8 +3288,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 	ad->rx_bulk_alloc_allowed = true;
 	ad->tx_simple_allowed = true;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_rx_queues) {
 		ret = ice_init_rss(pf);
@@ -3569,8 +3569,8 @@ ice_dev_start(struct rte_eth_dev *dev)
 	ice_set_rx_function(dev);
 	ice_set_tx_function(dev);
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-			ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+			RTE_ETH_VLAN_EXTEND_MASK;
 	ret = ice_vlan_offload_set(dev, mask);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
@@ -3682,40 +3682,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_KEEP_CRC |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->flow_type_rss_offloads = 0;
 
 	if (!is_safe_mode) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM |
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_QINQ_STRIP |
-			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH |
-			DEV_RX_OFFLOAD_TIMESTAMP;
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+			RTE_ETH_RX_OFFLOAD_RSS_HASH |
+			RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_QINQ_INSERT |
-			DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM |
-			DEV_TX_OFFLOAD_SCTP_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
 	dev_info->rx_queue_offload_capa = 0;
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
@@ -3754,24 +3754,24 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.nb_align = ICE_ALIGN_RING_DESC,
 	};
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			       ETH_LINK_SPEED_100M |
-			       ETH_LINK_SPEED_1G |
-			       ETH_LINK_SPEED_2_5G |
-			       ETH_LINK_SPEED_5G |
-			       ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_20G |
-			       ETH_LINK_SPEED_25G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			       RTE_ETH_LINK_SPEED_100M |
+			       RTE_ETH_LINK_SPEED_1G |
+			       RTE_ETH_LINK_SPEED_2_5G |
+			       RTE_ETH_LINK_SPEED_5G |
+			       RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_20G |
+			       RTE_ETH_LINK_SPEED_25G;
 
 	phy_type_low = hw->port_info->phy.phy_type_low;
 	phy_type_high = hw->port_info->phy.phy_type_high;
 
 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 
 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -3836,8 +3836,8 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
 					      &link_status, NULL);
 		if (status != ICE_SUCCESS) {
-			link.link_speed = ETH_SPEED_NUM_100M;
-			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_speed = RTE_ETH_SPEED_NUM_100M;
+			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 			PMD_DRV_LOG(ERR, "Failed to get link info");
 			goto out;
 		}
@@ -3853,55 +3853,55 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto out;
 
 	/* Full-duplex operation at all supported speeds */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	/* Parse the link status */
 	switch (link_status.link_speed) {
 	case ICE_AQ_LINK_SPEED_10MB:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case ICE_AQ_LINK_SPEED_100MB:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case ICE_AQ_LINK_SPEED_1000MB:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case ICE_AQ_LINK_SPEED_2500MB:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_5GB:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case ICE_AQ_LINK_SPEED_10GB:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case ICE_AQ_LINK_SPEED_20GB:
-		link.link_speed = ETH_SPEED_NUM_20G;
+		link.link_speed = RTE_ETH_SPEED_NUM_20G;
 		break;
 	case ICE_AQ_LINK_SPEED_25GB:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	case ICE_AQ_LINK_SPEED_40GB:
-		link.link_speed = ETH_SPEED_NUM_40G;
+		link.link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 	case ICE_AQ_LINK_SPEED_50GB:
-		link.link_speed = ETH_SPEED_NUM_50G;
+		link.link_speed = RTE_ETH_SPEED_NUM_50G;
 		break;
 	case ICE_AQ_LINK_SPEED_100GB:
-		link.link_speed = ETH_SPEED_NUM_100G;
+		link.link_speed = RTE_ETH_SPEED_NUM_100G;
 		break;
 	case ICE_AQ_LINK_SPEED_UNKNOWN:
 		PMD_DRV_LOG(ERR, "Unknown link speed");
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 	default:
 		PMD_DRV_LOG(ERR, "None link speed");
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 	}
 
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			      ETH_LINK_SPEED_FIXED);
+			      RTE_ETH_LINK_SPEED_FIXED);
 
 out:
 	ice_atomic_write_link_status(dev, &link);
@@ -4377,15 +4377,15 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ice_vsi_config_vlan_filter(vsi, true);
 		else
 			ice_vsi_config_vlan_filter(vsi, false);
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			ice_vsi_config_vlan_stripping(vsi, true);
 		else
 			ice_vsi_config_vlan_stripping(vsi, false);
@@ -4500,8 +4500,8 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			lut[i] = reta_conf[idx].reta[shift];
 	}
@@ -4550,8 +4550,8 @@ ice_rss_reta_query(struct rte_eth_dev *dev,
 		goto out;
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] = lut[i];
 	}
@@ -5460,7 +5460,7 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
 		break;
 	default:
@@ -5484,7 +5484,7 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
 		break;
 	default:
@@ -5505,7 +5505,7 @@ ice_timesync_enable(struct rte_eth_dev *dev)
 	int ret;
 
 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
-	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
 		return -1;
 	}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1cd3753ccc5f..599e0028f7e8 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -117,19 +117,19 @@
 		       ICE_FLAG_VF_MAC_BY_PF)
 
 #define ICE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD)
 
 /**
  * The overhead from MTU to max frame size.
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 20a3204fab7e..35eff8b17d28 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -39,27 +39,27 @@
 #define ICE_IPV4_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
 #define ICE_IPV6_PROT		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
 
-#define VALID_RSS_IPV4_L4	(ETH_RSS_NONFRAG_IPV4_UDP	| \
-				 ETH_RSS_NONFRAG_IPV4_TCP	| \
-				 ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4	(RTE_ETH_RSS_NONFRAG_IPV4_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
-#define VALID_RSS_IPV6_L4	(ETH_RSS_NONFRAG_IPV6_UDP	| \
-				 ETH_RSS_NONFRAG_IPV6_TCP	| \
-				 ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4	(RTE_ETH_RSS_NONFRAG_IPV6_UDP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	| \
+				 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4		(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
 				 VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6		(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
 				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
-#define VALID_RSS_ATTR		(ETH_RSS_L3_SRC_ONLY	| \
-				 ETH_RSS_L3_DST_ONLY	| \
-				 ETH_RSS_L4_SRC_ONLY	| \
-				 ETH_RSS_L4_DST_ONLY	| \
-				 ETH_RSS_L2_SRC_ONLY	| \
-				 ETH_RSS_L2_DST_ONLY	| \
+#define VALID_RSS_ATTR		(RTE_ETH_RSS_L3_SRC_ONLY	| \
+				 RTE_ETH_RSS_L3_DST_ONLY	| \
+				 RTE_ETH_RSS_L4_SRC_ONLY	| \
+				 RTE_ETH_RSS_L4_DST_ONLY	| \
+				 RTE_ETH_RSS_L2_SRC_ONLY	| \
+				 RTE_ETH_RSS_L2_DST_ONLY	| \
 				 RTE_ETH_RSS_L3_PRE32	| \
 				 RTE_ETH_RSS_L3_PRE48	| \
 				 RTE_ETH_RSS_L3_PRE64)
@@ -373,87 +373,87 @@ struct ice_rss_hash_cfg eth_tmplt = {
 };
 
 /* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4		(ETH_RSS_ETH | ETH_RSS_IPV4 | \
-					 ETH_RSS_FRAG_IPV4 | \
-					 ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_FRAG_IPV4 | \
+					 RTE_ETH_RSS_IPV4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_UDP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_TCP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV4_SCTP	(ICE_RSS_TYPE_ETH_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4		ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP		(ETH_RSS_IPV4 | \
-					 ETH_RSS_NONFRAG_IPV4_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4		RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP		(RTE_ETH_RSS_IPV4 | \
+					 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 /* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6		(ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(ETH_RSS_ETH | ETH_RSS_IPV6 | \
-					 ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG	(RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_ETH_IPV6_UDP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_TCP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP | \
-					 ETH_RSS_L4_CHKSUM)
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
 #define ICE_RSS_TYPE_ETH_IPV6_SCTP	(ICE_RSS_TYPE_ETH_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP | \
-					 ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6		ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP		(ETH_RSS_IPV6 | \
-					 ETH_RSS_NONFRAG_IPV6_SCTP)
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6		RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP		(RTE_ETH_RSS_IPV6 | \
+					 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 /* VLAN IPV4 */
 #define ICE_RSS_TYPE_VLAN_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV4)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV4)
 #define ICE_RSS_TYPE_VLAN_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV4_SCTP	(ICE_RSS_TYPE_IPV4_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 /* VLAN IPv6 */
 #define ICE_RSS_TYPE_VLAN_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_FRAG	(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
-					 ETH_RSS_FRAG_IPV6)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+					 RTE_ETH_RSS_FRAG_IPV6)
 #define ICE_RSS_TYPE_VLAN_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 #define ICE_RSS_TYPE_VLAN_IPV6_SCTP	(ICE_RSS_TYPE_IPV6_SCTP | \
-					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+					 RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
 
 /* GTPU IPv4 */
 #define ICE_RSS_TYPE_GTPU_IPV4		(ICE_RSS_TYPE_IPV4 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_UDP	(ICE_RSS_TYPE_IPV4_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV4_TCP	(ICE_RSS_TYPE_IPV4_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 /* GTPU IPv6 */
 #define ICE_RSS_TYPE_GTPU_IPV6		(ICE_RSS_TYPE_IPV6 | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_UDP	(ICE_RSS_TYPE_IPV6_UDP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 #define ICE_RSS_TYPE_GTPU_IPV6_TCP	(ICE_RSS_TYPE_IPV6_TCP | \
-					 ETH_RSS_GTPU)
+					 RTE_ETH_RSS_GTPU)
 
 /* PPPOE */
-#define ICE_RSS_TYPE_PPPOE		(ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE		(RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
 
 /* PPPOE IPv4 */
 #define ICE_RSS_TYPE_PPPOE_IPV4		(ICE_RSS_TYPE_IPV4 | \
@@ -472,17 +472,17 @@ struct ice_rss_hash_cfg eth_tmplt = {
 					 ICE_RSS_TYPE_PPPOE)
 
 /* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP		(ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP		(ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH		(ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH		(ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3	(ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP		(ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP		(RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH		(RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3	(RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP		(RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
 
 /* MAC */
-#define ICE_RSS_TYPE_ETH		ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH		RTE_ETH_RSS_ETH
 
 /**
  * Supported pattern for hash.
@@ -647,86 +647,86 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
-		if (!(rss_type & ETH_RSS_ETH))
+		if (!(rss_type & RTE_ETH_RSS_ETH))
 			*hash_flds &= ~ICE_FLOW_HASH_ETH;
-		if (rss_type & ETH_RSS_L2_SRC_ONLY)
+		if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
-		else if (rss_type & ETH_RSS_L2_DST_ONLY)
+		else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
 			*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
-		if (rss_type & ETH_RSS_ETH)
+		if (rss_type & RTE_ETH_RSS_ETH)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
-		if (rss_type & ETH_RSS_C_VLAN)
+		if (rss_type & RTE_ETH_RSS_C_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
-		else if (rss_type & ETH_RSS_S_VLAN)
+		else if (rss_type & RTE_ETH_RSS_S_VLAN)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
-		if (!(rss_type & ETH_RSS_PPPOE))
+		if (!(rss_type & RTE_ETH_RSS_PPPOE))
 			*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 		if (rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		    ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV4) {
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
 				*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
 				*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
 			}
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV4;
 		}
 
-		if (rss_type & ETH_RSS_IPV4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 		if (rss_type &
-		   (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_FRAG_IPV6)
+		   (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
 				*hash_flds |=
 					BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
-			if (rss_type & ETH_RSS_L3_SRC_ONLY)
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
-			else if (rss_type & ETH_RSS_L3_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 			else if (rss_type &
-				(ETH_RSS_L4_SRC_ONLY |
-				ETH_RSS_L4_DST_ONLY))
+				(RTE_ETH_RSS_L4_SRC_ONLY |
+				RTE_ETH_RSS_L4_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_IPV6;
 		}
 
 		if (rss_type & RTE_ETH_RSS_L3_PRE32) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
 			} else {
@@ -735,10 +735,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE48) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
 			} else {
@@ -747,10 +747,10 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			}
 		}
 		if (rss_type & RTE_ETH_RSS_L3_PRE64) {
-			if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+			if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
-			} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+			} else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
 				*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
 			} else {
@@ -762,81 +762,81 @@ ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_UDP |
-		    ETH_RSS_NONFRAG_IPV6_UDP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_TCP |
-		    ETH_RSS_NONFRAG_IPV6_TCP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
 		if (rss_type &
-		   (ETH_RSS_NONFRAG_IPV4_SCTP |
-		    ETH_RSS_NONFRAG_IPV6_SCTP)) {
-			if (rss_type & ETH_RSS_L4_SRC_ONLY)
+		   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+		    RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+			if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
-			else if (rss_type & ETH_RSS_L4_DST_ONLY)
+			else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
 			else if (rss_type &
-				(ETH_RSS_L3_SRC_ONLY |
-				  ETH_RSS_L3_DST_ONLY))
+				(RTE_ETH_RSS_L3_SRC_ONLY |
+				  RTE_ETH_RSS_L3_DST_ONLY))
 				*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		} else {
 			*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
 		}
 
-		if (rss_type & ETH_RSS_L4_CHKSUM)
+		if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
-		if (!(rss_type & ETH_RSS_L2TPV3))
+		if (!(rss_type & RTE_ETH_RSS_L2TPV3))
 			*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
-		if (!(rss_type & ETH_RSS_ESP))
+		if (!(rss_type & RTE_ETH_RSS_ESP))
 			*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
-		if (!(rss_type & ETH_RSS_AH))
+		if (!(rss_type & RTE_ETH_RSS_AH))
 			*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
 	}
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
-		if (!(rss_type & ETH_RSS_PFCP))
+		if (!(rss_type & RTE_ETH_RSS_PFCP))
 			*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
 	}
 }
@@ -870,7 +870,7 @@ ice_refine_hash_cfg_gtpu(struct ice_rss_hash_cfg *hash_cfg,
 	uint64_t *hash_flds = &hash_cfg->hash_flds;
 
 	/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
-	if (!(rss_type & ETH_RSS_GTPU))
+	if (!(rss_type & RTE_ETH_RSS_GTPU))
 		return;
 
 	if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
@@ -892,10 +892,10 @@ static void ice_refine_hash_cfg(struct ice_rss_hash_cfg *hash_cfg,
 }
 
 static uint64_t invalid_rss_comb[] = {
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
-	ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
-	ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	RTE_ETH_RSS_L3_PRE40 |
 	RTE_ETH_RSS_L3_PRE56 |
 	RTE_ETH_RSS_L3_PRE96
@@ -907,9 +907,9 @@ struct rss_attr_type {
 };
 
 static struct rss_attr_type rss_attr_to_valid_type[] = {
-	{ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY,	ETH_RSS_ETH},
-	{ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
-	{ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
+	{RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY,	RTE_ETH_RSS_ETH},
+	{RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY,	VALID_RSS_L3},
+	{RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY,	VALID_RSS_L4},
 	/* current ipv6 prefix only supports prefix 64 bits*/
 	{RTE_ETH_RSS_L3_PRE32,				VALID_RSS_IPV6},
 	{RTE_ETH_RSS_L3_PRE48,				VALID_RSS_IPV6},
@@ -928,16 +928,16 @@ ice_any_invalid_rss_type(enum rte_eth_hash_function rss_func,
 	 * hash function.
 	 */
 	if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
-		if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
-		    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+		if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+		    RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
 			return true;
 
 		if (!(rss_type &
-		   (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
-		    ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
-		    ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
-		    ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
-		    ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+		   (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+		    RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+		    RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+		    RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
 			return true;
 	}
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9f5..8406240d7209 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -303,7 +303,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 		}
 	}
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		/* Register mbuf field and flag for Rx timestamp */
 		err = rte_mbuf_dyn_rx_timestamp_register(
 				&ice_timestamp_dynfield_offset,
@@ -367,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1117,7 +1117,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -1624,7 +1624,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 				ts_ns = ice_tstamp_convert_32b_64b(hw,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 				if (ice_timestamp_dynflag > 0) {
@@ -1942,7 +1942,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2373,7 +2373,7 @@ ice_recv_pkts(void *rx_queue,
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 			ts_ns = ice_tstamp_convert_32b_64b(hw,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 			if (ice_timestamp_dynflag > 0) {
@@ -2889,7 +2889,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 	for (i = 0; i < txq->tx_rs_thresh; i++)
 		rte_prefetch0((txep + i)->mbuf);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
 			txep->mbuf = NULL;
@@ -3365,7 +3365,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
 	/* Use a simple Tx queue if possible (only fast free is allowed) */
 	ad->tx_simple_allowed =
 		(txq->offloads ==
-		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
 
 	if (ad->tx_simple_allowed)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 490693bff218..86955539bea8 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -474,7 +474,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 7efe7b50a206..af23f6a34e58 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -585,7 +585,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			 * will cause performance drop to get into this context.
 			 */
 			if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+					RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
 					_mm_load_si128
@@ -995,7 +995,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	txep = (void *)txq->sw_ring;
 	txep += txq->tx_next_dd - (n - 1);
 
-	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
 		struct rte_mempool *mp = txep[0].mbuf->pool;
 		void **cache_objs;
 		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index f0f99265857e..b1d975b31a5a 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -248,23 +248,23 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 }
 
 #define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 #define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define ICE_RX_VECTOR_OFFLOAD (				\
-		DEV_RX_OFFLOAD_CHECKSUM |		\
-		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_RX_OFFLOAD_VLAN |			\
-		DEV_RX_OFFLOAD_RSS_HASH)
+		RTE_ETH_RX_OFFLOAD_CHECKSUM |		\
+		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_RX_OFFLOAD_VLAN |			\
+		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define ICE_VECTOR_PATH		0
 #define ICE_VECTOR_OFFLOAD_PATH	1
@@ -287,7 +287,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		return -1;
 
 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 653bd28b417c..117494131f32 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -479,7 +479,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		 * will cause performance drop to get into this context.
 		 */
 		if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+				RTE_ETH_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index 2a1ed90b641b..7ce80a442b35 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -307,8 +307,8 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rx_mq_mode != ETH_MQ_RX_NONE &&
-		rx_mq_mode != ETH_MQ_RX_RSS) {
+	if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
 		/* RSS together with VMDq not supported*/
 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
 				rx_mq_mode);
@@ -318,7 +318,7 @@ igc_check_mq_mode(struct rte_eth_dev *dev)
 	/* To no break software that set invalid mode, only display
 	 * warning if invalid mode is used.
 	 */
-	if (tx_mq_mode != ETH_MQ_TX_NONE)
+	if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
 		PMD_INIT_LOG(WARNING,
 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
 			tx_mq_mode);
@@ -334,8 +334,8 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	ret  = igc_check_mq_mode(dev);
 	if (ret != 0)
@@ -473,12 +473,12 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		uint16_t duplex, speed;
 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
 		link.link_duplex = (duplex == FULL_DUPLEX) ?
-				ETH_LINK_FULL_DUPLEX :
-				ETH_LINK_HALF_DUPLEX;
+				RTE_ETH_LINK_FULL_DUPLEX :
+				RTE_ETH_LINK_HALF_DUPLEX;
 		link.link_speed = speed;
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 		if (speed == SPEED_2500) {
 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
@@ -490,9 +490,9 @@ eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		}
 	} else {
 		link.link_speed = 0;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_FIXED;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_autoneg = RTE_ETH_LINK_FIXED;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -525,7 +525,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
 				" Port %d: Link Up - speed %u Mbps - %s",
 				dev->data->port_id,
 				(unsigned int)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 				"full-duplex" : "half-duplex");
 		else
 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -972,18 +972,18 @@ eth_igc_start(struct rte_eth_dev *dev)
 
 	/* VLAN Offload Settings */
 	eth_igc_vlan_offload_set(dev,
-		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK);
+		RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK);
 
 	/* Setup link speed and duplex */
 	speeds = &dev->data->dev_conf.link_speeds;
-	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
 		hw->mac.autoneg = 1;
 	} else {
 		int num_speeds = 0;
 
-		if (*speeds & ETH_LINK_SPEED_FIXED) {
+		if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
 			PMD_DRV_LOG(ERR,
 				    "Force speed mode currently not supported");
 			igc_dev_clear_queues(dev);
@@ -993,33 +993,33 @@ eth_igc_start(struct rte_eth_dev *dev)
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.autoneg = 1;
 
-		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
 			num_speeds = -1;
 			goto error_invalid_config;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_10M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M_HD) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_100M) {
+		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_1G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
 			num_speeds++;
 		}
-		if (*speeds & ETH_LINK_SPEED_2_5G) {
+		if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
 			num_speeds++;
 		}
@@ -1482,14 +1482,14 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
-	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
 	dev_info->max_vmdq_pools = 0;
 
 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1515,9 +1515,9 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
 
 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -2141,13 +2141,13 @@ eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		rx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2179,16 +2179,16 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		hw->fc.requested_mode = igc_fc_none;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		hw->fc.requested_mode = igc_fc_rx_pause;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		hw->fc.requested_mode = igc_fc_tx_pause;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		hw->fc.requested_mode = igc_fc_full;
 		break;
 	default:
@@ -2234,29 +2234,29 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* set redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta, reg;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to update the register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* check mask whether need to read the register value first */
@@ -2290,29 +2290,29 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
 	uint16_t i;
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR,
 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
-			reta_size, ETH_RSS_RETA_SIZE_128);
+			reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
-	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+	RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
 
 	/* read redirection table */
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+	for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
 		union igc_rss_reta_reg reta;
 		uint16_t idx, shift;
 		uint8_t j, mask;
 
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 				IGC_RSS_RDT_REG_SIZE_MASK);
 
 		/* if no need to read register */
 		if (!mask ||
-		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+		    shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
 			continue;
 
 		/* read register and get the queue index */
@@ -2369,23 +2369,23 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 
 	rss_conf->rss_hf |= rss_hf;
 	return 0;
@@ -2514,22 +2514,22 @@ eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			igc_vlan_hw_strip_enable(dev);
 		else
 			igc_vlan_hw_strip_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			igc_vlan_hw_filter_enable(dev);
 		else
 			igc_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			return igc_vlan_hw_extend_enable(dev);
 		else
 			return igc_vlan_hw_extend_disable(dev);
@@ -2547,7 +2547,7 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	uint32_t reg_val;
 
 	/* only outer TPID of double VLAN can be configured*/
-	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+	if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
 		reg_val = IGC_READ_REG(hw, IGC_VET);
 		reg_val = (reg_val & (~IGC_VET_EXT)) |
 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 5e6c2ff30157..f56cad79e939 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -66,37 +66,37 @@ extern "C" {
 #define IGC_TX_MAX_MTU_SEG	UINT8_MAX
 
 #define IGC_RX_OFFLOAD_ALL	(    \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_VLAN_FILTER | \
-	DEV_RX_OFFLOAD_VLAN_EXTEND | \
-	DEV_RX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_RX_OFFLOAD_UDP_CKSUM   | \
-	DEV_RX_OFFLOAD_TCP_CKSUM   | \
-	DEV_RX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_RX_OFFLOAD_KEEP_CRC    | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+	RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+	RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
-	DEV_TX_OFFLOAD_VLAN_INSERT | \
-	DEV_TX_OFFLOAD_IPV4_CKSUM  | \
-	DEV_TX_OFFLOAD_UDP_CKSUM   | \
-	DEV_TX_OFFLOAD_TCP_CKSUM   | \
-	DEV_TX_OFFLOAD_SCTP_CKSUM  | \
-	DEV_TX_OFFLOAD_TCP_TSO     | \
-	DEV_TX_OFFLOAD_UDP_TSO	   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM   | \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  | \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO     | \
+	RTE_ETH_TX_OFFLOAD_UDP_TSO	   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define IGC_RSS_OFFLOAD_ALL	(    \
-	ETH_RSS_IPV4               | \
-	ETH_RSS_NONFRAG_IPV4_TCP   | \
-	ETH_RSS_NONFRAG_IPV4_UDP   | \
-	ETH_RSS_IPV6               | \
-	ETH_RSS_NONFRAG_IPV6_TCP   | \
-	ETH_RSS_NONFRAG_IPV6_UDP   | \
-	ETH_RSS_IPV6_EX            | \
-	ETH_RSS_IPV6_TCP_EX        | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4               | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP   | \
+	RTE_ETH_RSS_IPV6               | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP   | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP   | \
+	RTE_ETH_RSS_IPV6_EX            | \
+	RTE_ETH_RSS_IPV6_TCP_EX        | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IGC_MAX_ETQF_FILTERS		3	/* etqf(3) is used for 1588 */
 #define IGC_ETQF_FILTER_1588		3
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index 56132e8c6cd6..1d34ae2e1b15 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -127,7 +127,7 @@ struct igc_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
 };
 
 /** Offload features */
@@ -209,7 +209,7 @@ struct igc_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
 	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
 };
 
 static inline uint64_t
@@ -847,23 +847,23 @@ igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
 }
@@ -1037,10 +1037,10 @@ igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 	}
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		igc_rss_configure(dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/*
 		 * configure RSS register for following,
 		 * then disable the RSS logic
@@ -1111,7 +1111,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure
 		 */
-		rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+		rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
 				RTE_ETHER_CRC_LEN : 0;
 
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -1177,7 +1177,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	if (dev->data->scattered_rx) {
@@ -1221,20 +1221,20 @@ igc_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= IGC_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
 		rxcsum |= IGC_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_IPOFL;
 
 	if (offloads &
-		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+		(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		rxcsum |= IGC_RXCSUM_TUOFL;
-		offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
 	} else {
 		rxcsum &= ~IGC_RXCSUM_TUOFL;
 	}
 
-	if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+	if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
 		rxcsum |= IGC_RXCSUM_CRCOFL;
 	else
 		rxcsum &= ~IGC_RXCSUM_CRCOFL;
@@ -1242,7 +1242,7 @@ igc_rx_init(struct rte_eth_dev *dev)
 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
 	else
 		rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
@@ -1279,12 +1279,12 @@ igc_rx_init(struct rte_eth_dev *dev)
 		IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
 		dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			dvmolr |= IGC_DVMOLR_STRVLAN;
 		else
 			dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-		if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			dvmolr &= ~IGC_DVMOLR_STRCRC;
 		else
 			dvmolr |= IGC_DVMOLR_STRCRC;
@@ -2253,10 +2253,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 	reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
 	if (on) {
 		reg_val |= IGC_DVMOLR_STRVLAN;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index f94a1fed0a38..c688c3735c06 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -280,37 +280,37 @@ ionic_dev_link_update(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(link));
 
 	if (adapter->idev.port_info->config.an_enable) {
-		link.link_autoneg = ETH_LINK_AUTONEG;
+		link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	}
 
 	if (!adapter->link_up ||
 	    !(lif->state & IONIC_LIF_F_UP)) {
 		/* Interface is down */
-		link.link_status = ETH_LINK_DOWN;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else {
 		/* Interface is up */
-		link.link_status = ETH_LINK_UP;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		switch (adapter->link_speed) {
 		case  10000:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case  25000:
-			link.link_speed = ETH_SPEED_NUM_25G;
+			link.link_speed = RTE_ETH_SPEED_NUM_25G;
 			break;
 		case  40000:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case  50000:
-			link.link_speed = ETH_SPEED_NUM_50G;
+			link.link_speed = RTE_ETH_SPEED_NUM_50G;
 			break;
 		case 100000:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -387,17 +387,17 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
 
 	dev_info->speed_capa =
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	/*
 	 * Per-queue capabilities
 	 * RTE does not support disabling a feature on a queue if it is
 	 * enabled globally on the device. Thus the driver does not advertise
-	 * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+	 * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
 	 * though the driver would be otherwise capable of disabling it on
 	 * a per-queue basis.
 	 */
@@ -411,24 +411,24 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 	 */
 
 	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_SCATTER |
-		DEV_RX_OFFLOAD_RSS_HASH |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_SCATTER |
+		RTE_ETH_RX_OFFLOAD_RSS_HASH |
 		0;
 
 	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 		0;
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -463,9 +463,9 @@ ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		fc_conf->autoneg = 0;
 
 		if (idev->port_info->config.pause_type)
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf->mode = RTE_FC_NONE;
+			fc_conf->mode = RTE_ETH_FC_NONE;
 	}
 
 	return 0;
@@ -487,14 +487,14 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
 		break;
-	case RTE_FC_RX_PAUSE:
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		return -ENOTSUP;
 	}
 
@@ -545,12 +545,12 @@ ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = tbl_sz / RTE_RETA_GROUP_SIZE;
+	num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if (reta_conf[i].mask & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -585,12 +585,12 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-			&lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
-			RTE_RETA_GROUP_SIZE);
+			&lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+			RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -618,17 +618,17 @@ ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			IONIC_RSS_HASH_KEY_SIZE);
 
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -660,17 +660,17 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (!lif->rss_ind_tbl)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 			rss_types |= IONIC_RSS_TYPE_IPV4;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
-		if (rss_conf->rss_hf & ETH_RSS_IPV6)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 			rss_types |= IONIC_RSS_TYPE_IPV6;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 			rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
 
 		ionic_lif_rss_config(lif, rss_types, key, NULL);
@@ -842,15 +842,15 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)
 static inline uint32_t
 ionic_parse_link_speeds(uint16_t link_speeds)
 {
-	if (link_speeds & ETH_LINK_SPEED_100G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 		return 100000;
-	else if (link_speeds & ETH_LINK_SPEED_50G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 		return 50000;
-	else if (link_speeds & ETH_LINK_SPEED_40G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		return 40000;
-	else if (link_speeds & ETH_LINK_SPEED_25G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		return 25000;
-	else if (link_speeds & ETH_LINK_SPEED_10G)
+	else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		return 10000;
 	else
 		return 0;
@@ -874,12 +874,12 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	IONIC_PRINT_CALL();
 
 	allowed_speeds =
-		ETH_LINK_SPEED_FIXED |
-		ETH_LINK_SPEED_10G |
-		ETH_LINK_SPEED_25G |
-		ETH_LINK_SPEED_40G |
-		ETH_LINK_SPEED_50G |
-		ETH_LINK_SPEED_100G;
+		RTE_ETH_LINK_SPEED_FIXED |
+		RTE_ETH_LINK_SPEED_10G |
+		RTE_ETH_LINK_SPEED_25G |
+		RTE_ETH_LINK_SPEED_40G |
+		RTE_ETH_LINK_SPEED_50G |
+		RTE_ETH_LINK_SPEED_100G;
 
 	if (dev_conf->link_speeds & ~allowed_speeds) {
 		IONIC_PRINT(ERR, "Invalid link setting");
@@ -896,7 +896,7 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure link */
-	an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+	an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 	ionic_dev_cmd_port_autoneg(idev, an_enable);
 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
diff --git a/drivers/net/ionic/ionic_ethdev.h b/drivers/net/ionic/ionic_ethdev.h
index 6cbcd0f825a3..652f28c97d57 100644
--- a/drivers/net/ionic/ionic_ethdev.h
+++ b/drivers/net/ionic/ionic_ethdev.h
@@ -8,12 +8,12 @@
 #include <rte_ethdev.h>
 
 #define IONIC_ETH_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
 	(eth_dev)->data->dev_private)
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index a1f9ce2d81cb..5e8fdf3893ad 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -1688,12 +1688,12 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
 
 	/*
 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
-	 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
 	 */
-	rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
 		else
 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
@@ -1733,19 +1733,19 @@ ionic_lif_configure(struct ionic_lif *lif)
 	/*
 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
 	 *     setting this flag unconditionally causes problems in DTS.
-	 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 	 */
 
 	/* RX per-port */
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
-	    rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 		lif->features |= IONIC_ETH_HW_RX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		lif->features |= IONIC_ETH_HW_RX_SG;
 		lif->eth_dev->data->scattered_rx = 1;
 	} else {
@@ -1754,30 +1754,30 @@ ionic_lif_configure(struct ionic_lif *lif)
 	}
 
 	/* Covers VLAN_STRIP */
-	ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
 
 	/* TX per-port */
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		lif->features |= IONIC_ETH_HW_TX_CSUM;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
 	else
 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		lif->features |= IONIC_ETH_HW_TX_SG;
 	else
 		lif->features &= ~IONIC_ETH_HW_TX_SG;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		lif->features |= IONIC_ETH_HW_TSO;
 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
 		lif->features |= IONIC_ETH_HW_TSO_ECN;
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 4d16a39c6b6d..e3df7c56debe 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -203,11 +203,11 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 		txq->flags |= IONIC_QCQ_F_DEFERRED;
 
 	/* Convert the offload flags into queue flags */
-	if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
-	if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
-	if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 
 	eth_dev->data->tx_queues[tx_queue_id] = txq;
@@ -743,11 +743,11 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
 	/*
 	 * Note: the interface does not currently support
-	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 	 * when the adapter will be able to keep the CRC and subtract
 	 * it to the length for all received packets:
 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
-	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 	 *   rxq->crc_len = ETHER_CRC_LEN;
 	 */
 
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 063a9c6a6f7f..17088585757f 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -50,11 +50,11 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->speed_capa =
 		(hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
-		ETH_LINK_SPEED_10G :
+		RTE_ETH_LINK_SPEED_10G :
 		((hw->retimer.mac_type ==
 			IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
-		ETH_LINK_SPEED_25G :
-		ETH_LINK_SPEED_AUTONEG);
+		RTE_ETH_LINK_SPEED_25G :
+		RTE_ETH_LINK_SPEED_AUTONEG);
 
 	dev_info->max_rx_queues  = 1;
 	dev_info->max_tx_queues  = 1;
@@ -67,30 +67,30 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 	};
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_QINQ_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_EXTEND |
-		DEV_RX_OFFLOAD_VLAN_FILTER;
-
-	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_QINQ_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO |
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		DEV_TX_OFFLOAD_GRE_TNL_TSO |
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		DEV_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 		dev_info->tx_queue_offload_capa;
 
 	dev_info->dev_capa =
@@ -2399,10 +2399,10 @@ ipn3ke_update_link(struct rte_rawdev *rawdev,
 				(uint64_t *)&link_speed);
 	switch (link_speed) {
 	case IFPGA_RAWDEV_LINK_SPEED_10GB:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case IFPGA_RAWDEV_LINK_SPEED_25GB:
-		link->link_speed = ETH_SPEED_NUM_25G;
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
 		IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
@@ -2460,9 +2460,9 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
@@ -2518,9 +2518,9 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
-				ETH_LINK_SPEED_FIXED);
+				RTE_ETH_LINK_SPEED_FIXED);
 
 	rawdev = hw->rawdev;
 	ipn3ke_update_link(rawdev, rpst->port_id, &link);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 46c95425adfb..7fd2c539e002 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1857,7 +1857,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	qinq &= IXGBE_DMATXCTL_GDV;
 
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (qinq) {
 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
@@ -1872,7 +1872,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				    " by single VLAN");
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (qinq) {
 			/* Only the high 16-bits is valid */
 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
@@ -1959,10 +1959,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -2083,7 +2083,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 			ctrl |= IXGBE_VLNCTRL_VME;
 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -2100,7 +2100,7 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 				ctrl |= IXGBE_RXDCTL_VME;
 				on = TRUE;
 			} else {
@@ -2122,17 +2122,17 @@ ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct ixgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -2143,19 +2143,18 @@ ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		ixgbe_vlan_hw_strip_config(dev);
-	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2194,10 +2193,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -2221,18 +2220,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -2242,12 +2241,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -2256,12 +2255,12 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -2276,13 +2275,13 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2291,15 +2290,15 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -2308,39 +2307,39 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -2349,7 +2348,7 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB) {
 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
 				PMD_INIT_LOG(ERR,
@@ -2373,8 +2372,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multipe queue mode checking */
 	ret  = ixgbe_check_mq_mode(dev);
@@ -2619,15 +2618,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -2704,17 +2703,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_2_5G |  RTE_ETH_LINK_SPEED_5G |
+			RTE_ETH_LINK_SPEED_10G;
 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-			allowed_speeds = ETH_LINK_SPEED_10M |
-				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+			allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+				RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 		break;
 	default:
-		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+		allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 	}
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
@@ -2728,7 +2727,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
@@ -2746,17 +2745,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
 		}
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= IXGBE_LINK_SPEED_100_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= IXGBE_LINK_SPEED_10_FULL;
 	}
 
@@ -3832,7 +3831,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		 * When DCB/VT is off, maximum number of queues changes,
 		 * except for 82598EB, which remains constant.
 		 */
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
 				hw->mac.type != ixgbe_mac_82598EB)
 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
 	}
@@ -3842,9 +3841,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
@@ -3883,21 +3882,21 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
-		dev_info->speed_capa = ETH_LINK_SPEED_10M |
-			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+		dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
 
 	if (hw->mac.type == ixgbe_mac_X540 ||
 	    hw->mac.type == ixgbe_mac_X540_vf ||
 	    hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550_vf) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 	}
 	if (hw->mac.type == ixgbe_mac_X550) {
-		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
-		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
 	}
 
 	/* Driver-preferred Rx/Tx parameters */
@@ -3966,9 +3965,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
+		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4211,11 +4210,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	u32 esdp_reg;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 
 	hw->mac.get_link_status = true;
 
@@ -4237,8 +4236,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -4274,37 +4273,37 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 		break;
 
 	case IXGBE_LINK_SPEED_10_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 
 	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -4521,7 +4520,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -4740,13 +4739,13 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -5044,8 +5043,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5092,8 +5091,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
 						IXGBE_4_BIT_MASK);
 		if (!mask)
@@ -5255,22 +5254,22 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -5330,8 +5329,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	ixgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = ixgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -5568,10 +5567,10 @@ ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
@@ -5702,12 +5701,12 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 		}
@@ -5721,15 +5720,15 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= IXGBE_VMOLR_MPE;
 
 	return new_val;
@@ -6724,15 +6723,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = IXGBE_INCVAL_100;
 		shift = IXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = IXGBE_INCVAL_1GB;
 		shift = IXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = IXGBE_INCVAL_10GB;
 		shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -7143,16 +7142,16 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		return ETH_RSS_RETA_SIZE_512;
+		return RTE_ETH_RSS_RETA_SIZE_512;
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
-		return ETH_RSS_RETA_SIZE_64;
+		return RTE_ETH_RSS_RETA_SIZE_64;
 	case ixgbe_mac_X540_vf:
 	case ixgbe_mac_82599_vf:
 		return 0;
 	default:
-		return ETH_RSS_RETA_SIZE_128;
+		return RTE_ETH_RSS_RETA_SIZE_128;
 	}
 }
 
@@ -7162,10 +7161,10 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
-		if (reta_idx < ETH_RSS_RETA_SIZE_128)
+		if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
 			return IXGBE_RETA(reta_idx >> 2);
 		else
-			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+			return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
 	case ixgbe_mac_X550_vf:
 	case ixgbe_mac_X550EM_x_vf:
 	case ixgbe_mac_X550EM_a_vf:
@@ -7221,7 +7220,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -7232,7 +7231,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled*/
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -7256,9 +7255,9 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled*/
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7271,7 +7270,7 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -7524,7 +7523,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -7556,7 +7555,7 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -7653,12 +7652,12 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
 
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
@@ -7690,11 +7689,11 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
 		ret = -EINVAL;
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 950fb2d2450c..876b670f2682 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -114,15 +114,15 @@
 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE    0x0
 
 #define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
 #define IXGBE_VF_MAXMSIVECTOR           1
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 27a49bbce5e7..7894047829a8 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -90,9 +90,9 @@ static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
+		enum rte_eth_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
 			uint32_t fdircmd, uint32_t fdirhash,
@@ -163,20 +163,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
 {
 	*fdirctrl = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
 		break;
@@ -807,13 +807,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		return ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				PERFECT_BUCKET_128KB_HASH_MASK;
@@ -850,15 +850,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash = ixgbe_atr_compute_hash_82599(input,
 				IXGBE_ATR_BUCKET_HASH_KEY) &
 				SIG_BUCKET_128KB_HASH_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 27322ab9038a..bdc9d4796c02 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1259,7 +1259,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index e45c5501e6bf..944c9f23809e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -392,7 +392,7 @@ ixgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -400,7 +400,7 @@ ixgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -633,11 +633,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -657,7 +657,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
@@ -665,7 +665,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 295e5a39b245..9f1bd0a62ba4 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -104,15 +104,15 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -263,15 +263,15 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 		gpie |= IXGBE_GPIE_VTMODE_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 		gpie |= IXGBE_GPIE_VTMODE_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 		gpie |= IXGBE_GPIE_VTMODE_16;
 		break;
@@ -674,29 +674,29 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 		vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 		vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a51450fe5b82..aa3a406c204d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2592,26 +2592,26 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2780,7 +2780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/*
@@ -3021,7 +3021,7 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (hw->mac.type != ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return offloads;
 }
@@ -3032,19 +3032,19 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t offloads;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_SCATTER |
-		   DEV_RX_OFFLOAD_RSS_HASH;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (ixgbe_is_vf(dev) == 0)
-		offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 
 	/*
 	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3054,20 +3054,20 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	     hw->mac.type == ixgbe_mac_X540 ||
 	     hw->mac.type == ixgbe_mac_X550) &&
 	    !RTE_ETH_DEV_SRIOV(dev).active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
-		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -3122,7 +3122,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -3507,23 +3507,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
 	/* Set configured hashing protocols in MRQC register */
 	rss_hf = rss_conf->rss_hf;
 	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+	if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 	IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
 }
@@ -3605,23 +3605,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	}
 	rss_hf = 0;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
 	rss_conf->rss_hf = rss_hf;
 	return 0;
 }
@@ -3697,12 +3697,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		ixgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * RXPBSIZE
@@ -3727,7 +3727,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 
 		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -3736,7 +3736,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	}
 
 	/* MRQC: enable vmdq and dcb */
-	mrqc = (num_pools == ETH_16_POOLS) ?
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
@@ -3752,7 +3752,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3776,7 +3776,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 16 or 32 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*
 	 * MPSAR - allow pools to read specific mac addresses
@@ -3858,7 +3858,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		/*PF VF Transmit Enable*/
 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	ixgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3874,12 +3874,12 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3889,7 +3889,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3907,12 +3907,12 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3922,7 +3922,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3949,7 +3949,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3976,7 +3976,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -4145,7 +4145,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		if (hw->mac.type != ixgbe_mac_82598EB) {
 			config_dcb_rx = DCB_RX_CONFIG;
@@ -4158,8 +4158,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			ixgbe_vmdq_dcb_configure(dev);
 		}
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -4172,7 +4172,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -4183,7 +4183,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/*get DCB TX configuration parameters from rte_eth_conf*/
@@ -4199,15 +4199,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -4257,9 +4257,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
 		}
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
 	}
 	if (config_dcb_tx) {
 		/* Only support an equally distributed
@@ -4273,7 +4272,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
 		}
@@ -4309,7 +4308,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/*
@@ -4323,7 +4322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = ixgbe_dcb_pfc_enabled;
 		}
 		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -4344,12 +4343,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -4405,7 +4404,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* VFRE: pool enabling for receive - 64 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
 
 	/*
@@ -4526,11 +4525,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
 		break;
 
@@ -4551,17 +4550,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQEN);
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT4TCEN);
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
 			IXGBE_MRQC_VMDQRT8TCEN);
 		break;
@@ -4588,21 +4587,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			ixgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			ixgbe_rss_disable(dev);
@@ -4613,18 +4612,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			ixgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			ixgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4658,7 +4657,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			ixgbe_vmdq_tx_hw_configure(hw);
 		else {
 			mtqc = IXGBE_MTQC_64Q_1PB;
@@ -4671,13 +4670,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
 				IXGBE_MTQC_8TC_8TQ;
 			break;
@@ -4885,7 +4884,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		rxq->rx_using_sse = rx_using_sse;
 #ifdef RTE_LIB_SECURITY
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 #endif
 	}
 }
@@ -4913,10 +4912,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4924,8 +4923,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4939,7 +4938,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 	else
 		rfctl |= IXGBE_RFCTL_RSC_DIS;
@@ -4948,7 +4947,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -5070,7 +5069,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -5107,7 +5106,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5116,7 +5115,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -5158,11 +5157,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -5177,7 +5176,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -5187,7 +5186,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5393,9 +5392,9 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 
 #ifdef RTE_LIB_SECURITY
 	if ((dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_SECURITY) ||
+			RTE_ETH_RX_OFFLOAD_SECURITY) ||
 		(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY)) {
+			RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = ixgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -5681,7 +5680,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5730,7 +5729,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
@@ -5738,8 +5737,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index a1764f2b08af..668a5b9814f6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -133,7 +133,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_udp_csum_zero_err;
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -227,7 +227,7 @@ struct ixgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 005e60668a8b..cd34d4098785 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -277,7 +277,7 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index ae03ea6e9db3..ac8976062fa7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -119,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -375,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -392,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index 9fa75984fb31..bd528ff346c7 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -58,20 +58,20 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	/**< Maximum number of MAC addresses. */
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 	/**< Device RX offload capabilities. */
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	/**< Device TX offload capabilities. */
 
 	dev_info->speed_capa =
 		representor->pf_ethdev->data->dev_link.link_speed;
-	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	/**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 
 	dev_info->switch_info.name =
 		representor->pf_ethdev->device->name;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index cf089cd9aee5..9729f8575f53 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -303,10 +303,10 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
 	 */
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
+				  RTE_ETH_16_POOLS;
 	else
 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
+				  RTE_ETH_64_POOLS;
 
 	for (q = 0; q < queues_per_pool; q++)
 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
@@ -736,14 +736,14 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 	eth_conf = &dev->data->dev_conf;
 
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b1f8..eef6f6661c74 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -285,8 +285,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
 * @param rx_mask
 *    The RX mode mask, which is one or more of accepting Untagged Packets,
 *    packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-*    ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-*    ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+*    RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+*    RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
 *    in rx_mode.
 * @param on
 *    1 - Enable a VF RX mode.
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index cb9f7c8e8200..c428caf44189 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,10 @@ struct pmd_internals {
 };
 
 static const struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 static int is_kni_initialized;
 
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 0fc3f0ab66a9..90ffe31b9fda 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -384,15 +384,15 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		break;
 	/* CN23xx 25G cards */
 	case PCI_SUBSYS_DEV_ID_CN2350_225:
 	case PCI_SUBSYS_DEV_ID_CN2360_225:
-		devinfo->speed_capa = ETH_LINK_SPEED_25G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
 		break;
 	default:
-		devinfo->speed_capa = ETH_LINK_SPEED_10G;
+		devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 		lio_dev_err(lio_dev,
 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
 		return -EINVAL;
@@ -406,27 +406,27 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	devinfo->max_mac_addrs = 1;
 
-	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_RX_OFFLOAD_UDP_CKSUM		|
-				    DEV_RX_OFFLOAD_TCP_CKSUM		|
-				    DEV_RX_OFFLOAD_VLAN_STRIP		|
-				    DEV_RX_OFFLOAD_RSS_HASH);
-	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
-				    DEV_TX_OFFLOAD_UDP_CKSUM		|
-				    DEV_TX_OFFLOAD_TCP_CKSUM		|
-				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+	devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH);
+	devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	devinfo->rx_desc_lim = lio_rx_desc_lim;
 	devinfo->tx_desc_lim = lio_tx_desc_lim;
 
 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
-	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
-					   ETH_RSS_NONFRAG_IPV4_TCP	|
-					   ETH_RSS_IPV6			|
-					   ETH_RSS_NONFRAG_IPV6_TCP	|
-					   ETH_RSS_IPV6_EX		|
-					   ETH_RSS_IPV6_TCP_EX);
+	devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4			|
+					   RTE_ETH_RSS_NONFRAG_IPV4_TCP	|
+					   RTE_ETH_RSS_IPV6			|
+					   RTE_ETH_RSS_NONFRAG_IPV6_TCP	|
+					   RTE_ETH_RSS_IPV6_EX		|
+					   RTE_ETH_RSS_IPV6_TCP_EX);
 	return 0;
 }
 
@@ -519,10 +519,10 @@ lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
 
-	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
-				index = (i * RTE_RETA_GROUP_SIZE) + j;
+				index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
 				rss_state->itable[index] = reta_conf[i].reta[j];
 			}
 		}
@@ -562,12 +562,12 @@ lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
 		return -EINVAL;
 	}
 
-	num = reta_size / RTE_RETA_GROUP_SIZE;
+	num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
 
 	for (i = 0; i < num; i++) {
 		memcpy(reta_conf->reta,
-		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
-		       RTE_RETA_GROUP_SIZE);
+		       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+		       RTE_ETH_RETA_GROUP_SIZE);
 		reta_conf++;
 	}
 
@@ -595,17 +595,17 @@ lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
 
 	if (rss_state->ip)
-		rss_hf |= ETH_RSS_IPV4;
+		rss_hf |= RTE_ETH_RSS_IPV4;
 	if (rss_state->tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	if (rss_state->ipv6)
-		rss_hf |= ETH_RSS_IPV6;
+		rss_hf |= RTE_ETH_RSS_IPV6;
 	if (rss_state->ipv6_tcp_hash)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (rss_state->ipv6_ex)
-		rss_hf |= ETH_RSS_IPV6_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_EX;
 	if (rss_state->ipv6_tcp_ex_hash)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	rss_conf->rss_hf = rss_hf;
 
@@ -673,42 +673,42 @@ lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		if (rss_state->hash_disable)
 			return -EINVAL;
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 			hashinfo |= LIO_RSS_HASH_IPV4;
 			rss_state->ip = 1;
 		} else {
 			rss_state->ip = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
 			rss_state->tcp_hash = 1;
 		} else {
 			rss_state->tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
 			hashinfo |= LIO_RSS_HASH_IPV6;
 			rss_state->ipv6 = 1;
 		} else {
 			rss_state->ipv6 = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
 			rss_state->ipv6_tcp_hash = 1;
 		} else {
 			rss_state->ipv6_tcp_hash = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
 			rss_state->ipv6_ex = 1;
 		} else {
 			rss_state->ipv6_ex = 0;
 		}
 
-		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+		if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
 			rss_state->ipv6_tcp_ex_hash = 1;
 		} else {
@@ -757,7 +757,7 @@ lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -814,7 +814,7 @@ lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
 	if (udp_tnl == NULL)
 		return -EINVAL;
 
-	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+	if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
 		return -1;
 	}
@@ -912,10 +912,10 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	/* Initialize */
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	/* Return what we found */
 	if (lio_dev->linfo.link.s.link_up == 0) {
@@ -923,18 +923,18 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
 		return rte_eth_linkstatus_set(eth_dev, &link);
 	}
 
-	link.link_status = ETH_LINK_UP; /* Interface is up */
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	switch (lio_dev->linfo.link.s.speed) {
 	case LIO_LINK_SPEED_10000:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case LIO_LINK_SPEED_25000:
-		link.link_speed = ETH_SPEED_NUM_25G;
+		link.link_speed = RTE_ETH_SPEED_NUM_25G;
 		break;
 	default:
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 
 	return rte_eth_linkstatus_set(eth_dev, &link);
@@ -1086,8 +1086,8 @@ lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
 
 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
 				  i % eth_dev->data->nb_rx_queues : 0);
-		conf_idx = i / RTE_RETA_GROUP_SIZE;
-		reta_idx = i % RTE_RETA_GROUP_SIZE;
+		conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
 	}
@@ -1103,10 +1103,10 @@ lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rss_conf rss_conf;
 
 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		lio_dev_rss_configure(eth_dev);
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 	/* if mq_mode is none, disable rss mode. */
 	default:
 		memset(&rss_conf, 0, sizeof(rss_conf));
@@ -1484,7 +1484,7 @@ lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 1;
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -1505,11 +1505,11 @@ lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	}
 
 	lio_dev->linfo.link.s.link_up = 0;
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
 		lio_dev->linfo.link.s.link_up = 1;
-		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
 		return -1;
 	}
@@ -1721,9 +1721,9 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		eth_dev->data->dev_conf.rxmode.offloads |=
-			DEV_RX_OFFLOAD_RSS_HASH;
+			RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Inform firmware about change in number of queues to use.
 	 * Disable IO queues and reset registers for re-configuration.
diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index 364e818d65c1..8533e39f6957 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -525,7 +525,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 980150293e86..9deb7a5f1360 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_AUTONEG
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION		"memif_mp_send_region"
@@ -199,7 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
 	dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@ memif_connect(struct rte_eth_dev *dev)
 
 		pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 		pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	}
 	MIF_LOG(INFO, "Connected.");
 	return 0;
@@ -1381,10 +1381,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 		proc_private = dev->process_private;
-		if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+		if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
 				proc_private->regions_num == 0) {
 			memif_mp_request_regions(dev);
-		} else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+		} else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
 				proc_private->regions_num > 0) {
 			memif_free_regions(dev);
 		}
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 783ff94dce8d..d606ec8ca76d 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -657,11 +657,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->if_index = priv->if_index;
 	info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
 	info->speed_capa =
-			ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_20G |
-			ETH_LINK_SPEED_40G |
-			ETH_LINK_SPEED_56G;
+			RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_20G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_56G;
 	info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
 
 	return 0;
@@ -821,13 +821,13 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		dev_link.link_speed = link_speed;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	dev->data->dev_link = dev_link;
 	return 0;
 }
@@ -863,13 +863,13 @@ mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	ret = 0;
 out:
 	MLX4_ASSERT(ret >= 0);
@@ -899,13 +899,13 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 71ea91b3fb82..2e1b6c87e983 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -109,21 +109,21 @@ mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
 	};
 	static const uint64_t dpdk[] = {
 		[INNER] = 0,
-		[IPV4] = ETH_RSS_IPV4,
-		[IPV4_1] = ETH_RSS_FRAG_IPV4,
-		[IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
-		[IPV6] = ETH_RSS_IPV6,
-		[IPV6_1] = ETH_RSS_FRAG_IPV6,
-		[IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
-		[IPV6_3] = ETH_RSS_IPV6_EX,
+		[IPV4] = RTE_ETH_RSS_IPV4,
+		[IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+		[IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IPV6] = RTE_ETH_RSS_IPV6,
+		[IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+		[IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IPV6_3] = RTE_ETH_RSS_IPV6_EX,
 		[TCP] = 0,
 		[UDP] = 0,
-		[IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
-		[IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
-		[IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
-		[IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
-		[IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
-		[IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+		[IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+		[IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+		[IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+		[IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+		[IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+		[IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
 	};
 	static const uint64_t verbs[RTE_DIM(dpdk)] = {
 		[INNER] = IBV_RX_HASH_INNER,
@@ -1283,7 +1283,7 @@ mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
  * - MAC flow rules are generated from @p dev->data->mac_addrs
  *   (@p priv->mac array).
  * - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
  *   is enabled and VLAN filters are configured.
  *
  * @param priv
@@ -1358,7 +1358,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
 	struct rte_ether_addr *rule_mac = &eth_spec.dst;
 	rte_be16_t *rule_vlan =
 		(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
-		 DEV_RX_OFFLOAD_VLAN_FILTER) &&
+		 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 		!ETH_DEV(priv)->data->promiscuous ?
 		&vlan_spec.tci :
 		NULL;
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index d56009c41845..2aab0f60a7b5 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -118,7 +118,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
 static void
 mlx4_link_status_alarm(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	MLX4_ASSERT(priv->intr_alarm == 1);
@@ -183,7 +183,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
 	};
 	uint32_t caught[RTE_DIM(type)] = { 0 };
 	struct ibv_async_event event;
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	unsigned int i;
 
@@ -280,7 +280,7 @@ mlx4_intr_uninstall(struct mlx4_priv *priv)
 int
 mlx4_intr_install(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 	int rc;
 
@@ -386,7 +386,7 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
 {
-	const struct rte_intr_conf *const intr_conf =
+	const struct rte_eth_intr_conf *const intr_conf =
 		&ETH_DEV(priv)->data->dev_conf.intr_conf;
 
 	if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index ee2d2b75e59a..781ee256df71 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -682,12 +682,12 @@ mlx4_rxq_detach(struct rxq *rxq)
 uint64_t
 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
-			    DEV_RX_OFFLOAD_KEEP_CRC |
-			    DEV_RX_OFFLOAD_RSS_HASH;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+			    RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (priv->hw_csum)
-		offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	return offloads;
 }
 
@@ -703,7 +703,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
 uint64_t
 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	(void)priv;
 	return offloads;
@@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
 	crc_present = 0;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (priv->hw_fcs_strip) {
 			crc_present = 1;
 		} else {
@@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -832,7 +832,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 	if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
 		uint32_t sges_n;
 
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7d8c4f2a2223..0db2e55befd3 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -273,20 +273,20 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 uint64_t
 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
 {
-	uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+	uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (priv->hw_csum) {
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	}
 	if (priv->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum_l2tun) {
-		offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (priv->tso)
-			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	}
 	return offloads;
 }
@@ -394,12 +394,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-					   DEV_TX_OFFLOAD_UDP_CKSUM |
-					   DEV_TX_OFFLOAD_TCP_CKSUM)),
+			(offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
 			      (offloads &
-			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+			       RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
 		.bounce_buf = bounce_buf,
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c641..79e27fe2d668 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -439,24 +439,24 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
 	}
 	link_speed = ethtool_cmd_speed(&edata);
 	if (link_speed == -1)
-		dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	else
 		dev_link.link_speed = link_speed;
 	priv->link_speed_capa = 0;
 	if (edata.supported & (SUPPORTED_1000baseT_Full |
 			       SUPPORTED_1000baseKX_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (edata.supported & SUPPORTED_10000baseKR_Full)
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
 			       SUPPORTED_40000baseCR4_Full |
 			       SUPPORTED_40000baseSR4_Full |
 			       SUPPORTED_40000baseLR4_Full))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -545,45 +545,45 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		return ret;
 	}
 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
-				ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
 	sc = ecmd->link_mode_masks[0] |
 		((uint64_t)ecmd->link_mode_masks[1] << 32);
 	priv->link_speed_capa = 0;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 
 	sc = ecmd->link_mode_masks[2] |
 		((uint64_t)ecmd->link_mode_masks[3] << 32);
@@ -591,11 +591,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
 		  MLX5_BITSHIFT
 		       (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
-		priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+		priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
-				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-				  ETH_LINK_SPEED_FIXED);
+				  RTE_ETH_LINK_SPEED_FIXED);
 	*link = dev_link;
 	return 0;
 }
@@ -677,13 +677,13 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 	fc_conf->autoneg = ethpause.autoneg;
 	if (ethpause.rx_pause && ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (ethpause.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (ethpause.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -709,14 +709,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	ifr.ifr_data = (void *)&ethpause;
 	ethpause.autoneg = fc_conf->autoneg;
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_RX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
 		ethpause.rx_pause = 1;
 	else
 		ethpause.rx_pause = 0;
 
-	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-	    (fc_conf->mode & RTE_FC_TX_PAUSE))
+	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
 		ethpause.tx_pause = 1;
 	else
 		ethpause.tx_pause = 0;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 111a7597317a..23d9e0a476ac 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1310,8 +1310,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
@@ -1594,7 +1594,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/*
 	 * If HW has bug working with tunnel packet decapsulation and
 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
 	 */
 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
 		config->hw_fcs_strip = 0;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 7263d354b180..3a9b716e438c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1704,10 +1704,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
 	MLX5_ASSERT(udp_tunnel != NULL);
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
-	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
 	    udp_tunnel->udp_port == 4790)
 		return 0;
 	return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 42cacd0bbe3b..52f03ada2ced 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1233,7 +1233,7 @@ TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
 struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index fe86bb40d351..12ddf4c7ff28 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -90,11 +90,11 @@
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
 /* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
-			       ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+			       RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
 			    MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 82e2284d9866..f2b78c3cc69e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -91,7 +91,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	if ((dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+			RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
 			rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
 		DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
 			dev->data->port_id);
@@ -225,8 +225,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->default_txportconf.ring_size = 256;
 	info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
 	info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
-	if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
-		(priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+	if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+		(priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
 		info->default_rxportconf.nb_queues = 16;
 		info->default_txportconf.nb_queues = 16;
 		if (dev->data->nb_rx_queues > 2 ||
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 002449e993e7..d645fd48647e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
 	uint64_t rss_types;
 	/**<
 	 * RSS types bit-field associated with this node
-	 * (see ETH_RSS_* definitions).
+	 * (see RTE_ETH_RSS_* definitions).
 	 */
 	uint64_t node_flags;
 	/**<
@@ -298,7 +298,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -560,8 +560,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_IPV4,
 			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -569,11 +569,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -584,8 +584,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 			 MLX5_EXPANSION_GRE,
 			 MLX5_EXPANSION_NVGRE),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -593,11 +593,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 						  MLX5_EXPANSION_MPLS,
 						  MLX5_EXPANSION_GTP),
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_VXLAN] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -659,32 +659,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
 						  MLX5_EXPANSION_IPV4_TCP),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
-		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-			ETH_RSS_NONFRAG_IPV4_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	},
 	[MLX5_EXPANSION_IPV4_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	},
 	[MLX5_EXPANSION_IPV4_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	},
 	[MLX5_EXPANSION_IPV6] = {
 		.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
 						  MLX5_EXPANSION_IPV6_TCP,
 						  MLX5_EXPANSION_IPV6_FRAG_EXT),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
-		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-			ETH_RSS_NONFRAG_IPV6_OTHER,
+		.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
 	},
 	[MLX5_EXPANSION_IPV6_UDP] = {
 		.type = RTE_FLOW_ITEM_TYPE_UDP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
 	},
 	[MLX5_EXPANSION_IPV6_TCP] = {
 		.type = RTE_FLOW_ITEM_TYPE_TCP,
-		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+		.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
 	},
 	[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
 		.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -1100,7 +1100,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1653,14 +1653,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  &rss->types,
 					  "some RSS protocols are not"
 					  " supported");
-	if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-	    !(rss->types & ETH_RSS_IP))
+	if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+	    !(rss->types & RTE_ETH_RSS_IP))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L3 partial RSS requested but L3 RSS"
 					  " type not specified");
-	if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-	    !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+	if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+	    !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "L4 partial RSS requested but L4 RSS"
@@ -6427,8 +6427,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
 		rss_desc->level = rss->level;
-		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+		/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+		rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -7126,7 +7126,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	if (!priv->reta_idx_n || !priv->rxqs_n) {
 		return 0;
 	}
-	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+	if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
@@ -8794,7 +8794,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 				(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+		if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
 			ctx->action_rss.types = 0;
 		for (i = 0; i != priv->reta_idx_n; ++i)
 			ctx->queue[i] = (*priv->reta_idx)[i];
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f1a83d537d0c..4a16f30fb7a6 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -331,18 +331,18 @@ enum mlx5_feature_name {
 
 /* Valid layer type for IPV4 RSS. */
 #define MLX5_IPV4_LAYER_TYPES \
-	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
-	 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
-	 ETH_RSS_NONFRAG_IPV4_OTHER)
+	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
 
 /* IBV hash source bits  for IPV4. */
 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
 
 /* Valid layer type for IPV6 RSS. */
 #define MLX5_IPV6_LAYER_TYPES \
-	(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
-	 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX  | ETH_RSS_IPV6_TCP_EX | \
-	 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
+	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
 
 /* IBV hash source bits  for IPV6. */
 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5bd90bfa2818..c4a5706532a9 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10862,9 +10862,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
 			else
 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10872,9 +10872,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-			if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-			else if (rss_types & ETH_RSS_L3_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
 			else
 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10888,11 +10888,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		return;
 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-		if (rss_types & ETH_RSS_UDP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_UDP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_UDP;
 			else
@@ -10900,11 +10900,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
 		}
 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-		if (rss_types & ETH_RSS_TCP) {
-			if (rss_types & ETH_RSS_L4_SRC_ONLY)
+		if (rss_types & RTE_ETH_RSS_TCP) {
+			if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_SRC_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_DST_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				dev_flow->hash_fields |=
 						IBV_RX_HASH_DST_PORT_TCP;
 			else
@@ -14444,9 +14444,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4:
 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV4;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV4;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV4;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14455,9 +14455,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV6:
 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
 			*hash_field &= ~MLX5_RSS_HASH_IPV6;
-			if (rss_types & ETH_RSS_L3_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_IPV6;
-			else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_IPV6;
 			else
 				*hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14466,11 +14466,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_UDP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_UDP:
-		if (rss_types & ETH_RSS_UDP) {
+		if (rss_types & RTE_ETH_RSS_UDP) {
 			*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
 			else
 				*hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14479,11 +14479,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
 	case MLX5_RSS_HASH_IPV4_TCP:
 		/* fall-through. */
 	case MLX5_RSS_HASH_IPV6_TCP:
-		if (rss_types & ETH_RSS_TCP) {
+		if (rss_types & RTE_ETH_RSS_TCP) {
 			*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-			if (rss_types & ETH_RSS_L4_DST_ONLY)
+			if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
 				*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-			else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+			else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
 				*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
 			else
 				*hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14631,8 +14631,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
 	origin = &shared_rss->origin;
 	origin->func = rss->func;
 	origin->level = rss->level;
-	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+	/* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+	origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
 	memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 892abcb65779..f9010a674d7f 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1824,7 +1824,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, RTE_ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1837,7 +1837,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 			if (dev_flow->hash_fields != 0)
 				dev_flow->hash_fields |=
 					mlx5_flow_hashfields_adjust
-					(rss_desc, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, RTE_ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b8..a4f690039e24 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -68,7 +68,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
 		if (!(*priv->rxqs)[i])
 			continue;
 		(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
-			!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+			!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
 		++idx;
 	}
 	return 0;
@@ -170,8 +170,8 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
 	}
 	/* Fill each entry of the table even if its bit is not set. */
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
 			(*priv->reta_idx)[i];
 	}
 	return 0;
@@ -209,8 +209,8 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 	for (idx = 0, i = 0; (i != reta_size); ++i) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
 		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 60673d014d02..14b9991c5fa8 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -333,22 +333,22 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
-	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
-			     DEV_RX_OFFLOAD_TIMESTAMP |
-			     DEV_RX_OFFLOAD_RSS_HASH);
+	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
 	if (!config->mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
 	if (config->hw_fcs_strip)
-		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (config->hw_csum)
-		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
-			     DEV_RX_OFFLOAD_UDP_CKSUM |
-			     DEV_RX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
 	if (config->hw_vlan_strip)
-		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	return offloads;
 }
 
@@ -362,7 +362,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 uint64_t
 mlx5_get_rx_port_offloads(void)
 {
-	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 
 	return offloads;
 }
@@ -694,7 +694,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				    dev->data->dev_conf.rxmode.offloads;
 
 		/* The offloads should be checked on rte_eth_dev layer. */
-		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
 			DRV_LOG(ERR, "port %u queue index %u split "
 				     "offload not configured",
@@ -1336,7 +1336,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
-	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	unsigned int max_rx_pktlen = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
@@ -1439,7 +1439,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
-	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1485,7 +1485,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			config->mprq.stride_size_n : mprq_stride_size;
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_scatter_en =
-				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
+				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
 				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pktlen,
@@ -1500,7 +1500,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pktlen;
-	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1561,9 +1561,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
 	/* Configure Rx timestamp. */
-	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
 	tmpl->rxq.timestamp_rx_flag = 0;
 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
 			&tmpl->rxq.timestamp_offset,
@@ -1572,11 +1572,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
-	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		if (config->hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
@@ -1606,7 +1606,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		tmpl->rxq.crc_present << 2);
 	/* Save port ID. */
 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
-		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
 	tmpl->rxq.mp = rx_seg[0].mp;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 93b4f517bb3e..65d91bdf67e2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -16,10 +16,10 @@
 
 /* HW checksum offload capabilities of vectorized Tx. */
 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
-	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
-	 DEV_TX_OFFLOAD_UDP_CKSUM | \
-	 DEV_TX_OFFLOAD_TCP_CKSUM | \
-	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+	 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 
 /*
  * Compile time sanity check for vectorized functions.
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index df671379e46d..12aeba60348a 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -523,36 +523,36 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	unsigned int diff = 0, olx = 0, i, m;
 
 	MLX5_ASSERT(priv);
-	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			   DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			   DEV_TX_OFFLOAD_GRE_TNL_TSO |
-			   DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			   RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
 		/* We should support TCP Send Offload. */
 		olx |= MLX5_TXOFF_CONFIG_TSO;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
-			   DEV_TX_OFFLOAD_UDP_TNL_TSO |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support Software Parser for Tunnels. */
 		olx |= MLX5_TXOFF_CONFIG_SWP;
 	}
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		/* We should support IP/TCP/UDP Checksums. */
 		olx |= MLX5_TXOFF_CONFIG_CSUM;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
 		/* We should support VLAN insertion. */
 		olx |= MLX5_TXOFF_CONFIG_VLAN;
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
 	    rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
 	    rte_mbuf_dynfield_lookup
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 1f92250f5edd..02bb9307ae61 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -98,42 +98,42 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_VLAN_INSERT);
+	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
 	struct mlx5_dev_config *config = &priv->config;
 
 	if (config->hw_csum)
-		offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_UDP_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM);
+		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (config->tso)
-		offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (config->tx_pp)
-		offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (config->swp) {
 		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
-			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
+			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	if (config->tunnel_en) {
 		if (config->hw_csum)
-			offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 		if (config->tso) {
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
-				offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
 			if (config->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
-				offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 		}
 	}
 	if (!config->mprq.enabled)
-		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -801,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	unsigned int inlen_mode; /* Minimal required Inline data. */
 	unsigned int txqs_inline; /* Min Tx queues to enable inline. */
 	uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-	bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-					    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-					    DEV_TX_OFFLOAD_IP_TNL_TSO |
-					    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+	bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+					    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	bool vlan_inline;
 	unsigned int temp;
 
 	txq_ctrl->txq.fast_free =
-		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		!!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
 		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
@@ -870,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	 * tx_burst routine.
 	 */
 	txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-	vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+	vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
 		      !config->hw_vlan_insert;
 	/*
 	 * If there are few Tx queues it is prioritized
@@ -978,19 +978,19 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 						    MLX5_MAX_TSO_HEADER);
 		txq_ctrl->txq.tso_en = 1;
 	}
-	if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
-	   ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
-	   ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
 	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
 	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
 		txq_ctrl->txq.tunnel_en = 1;
-	txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
-				  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
 				  txq_ctrl->txq.offloads) && (config->swp &
 				  MLX5_SW_PARSING_TSO_CAP)) |
-				((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
 				 txq_ctrl->txq.offloads) && (config->swp &
 				 MLX5_SW_PARSING_CSUM_CAP));
 }
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 60f97f2d2d1f..07792fc5d94f 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -142,9 +142,9 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-				       DEV_RX_OFFLOAD_VLAN_STRIP);
+				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 		if (!priv->config.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 31c4d3276053..9a9069da7572 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -485,8 +485,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
 	 */
-	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
 	if (config->hw_padding) {
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
index 2a0288087357..10fe6d828ccd 100644
--- a/drivers/net/mvneta/mvneta_ethdev.c
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -114,7 +114,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 	struct mvneta_priv *priv = dev->data->dev_private;
 	struct neta_ppio_params *ppio_params;
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
 		MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		if (dev->data->nb_rx_queues > 1)
@@ -126,7 +126,7 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ppio_params = &priv->ppio_params;
@@ -151,10 +151,10 @@ static int
 mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
 		   struct rte_eth_dev_info *info)
 {
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G;
 
 	info->max_rx_queues = MRVL_NETA_RXQ_MAX;
 	info->max_tx_queues = MRVL_NETA_TXQ_MAX;
@@ -503,28 +503,28 @@ mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 
 	neta_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
index 126a9a0c11b9..ccb87d518d83 100644
--- a/drivers/net/mvneta/mvneta_ethdev.h
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -54,14 +54,14 @@
 #define MRVL_NETA_MRU_TO_MTU(mru)	((mru) - MRVL_NETA_HDRS_LEN)
 
 /** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				    DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				    RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS)
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 				PKT_TX_TCP_CKSUM | \
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
index 9836bb071a82..62d8aa586dae 100644
--- a/drivers/net/mvneta/mvneta_rxtx.c
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -734,7 +734,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->mp = mp;
 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
-			     DEV_RX_OFFLOAD_IPV4_CKSUM;
+			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	rxq->size = desc;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index a6458d2ce9b5..d0746b0d1215 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -58,15 +58,15 @@
 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
 
 /** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
-			  DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
 
 /** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-				  DEV_TX_OFFLOAD_UDP_CKSUM  | \
-				  DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
-			  DEV_TX_OFFLOAD_MULTI_SEGS)
+			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
 			      PKT_TX_TCP_CKSUM | \
@@ -442,14 +442,14 @@ mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
 
 	if (rss_conf->rss_hf == 0) {
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-	} else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_2_TUPLE;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 1;
-	} else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
 		priv->ppio_params.inqs_params.hash_type =
 			PP2_PPIO_HASH_T_5_TUPLE;
 		priv->rss_hf_tcp = 0;
@@ -483,8 +483,8 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
-	    dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
 			dev->data->dev_conf.rxmode.mq_mode);
 		return -EINVAL;
@@ -502,7 +502,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		priv->multiseg = 1;
 
 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
@@ -524,7 +524,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 
 	if (dev->data->nb_rx_queues == 1 &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
 		priv->configured = 1;
@@ -623,7 +623,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_UP;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 		return 0;
 	}
 
@@ -644,7 +644,7 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -664,14 +664,14 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
 	int ret;
 
 	if (!priv->ppio) {
-		dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 		return 0;
 	}
 	ret = pp2_ppio_disable(priv->ppio);
 	if (ret)
 		return ret;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
@@ -893,7 +893,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->all_multicast == 1)
 		mrvl_allmulticast_enable(dev);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 		ret = mrvl_populate_vlan_table(dev, 1);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to populate VLAN table");
@@ -929,11 +929,11 @@ mrvl_dev_start(struct rte_eth_dev *dev)
 		priv->flow_ctrl = 0;
 	}
 
-	if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
 		ret = mrvl_dev_set_link_up(dev);
 		if (ret) {
 			MRVL_LOG(ERR, "Failed to set link up");
-			dev->data->dev_link.link_status = ETH_LINK_DOWN;
+			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 			goto out;
 		}
 	}
@@ -1202,30 +1202,30 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 
 	switch (ethtool_cmd_speed(&edata)) {
 	case SPEED_10:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		break;
 	case SPEED_100:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 	case SPEED_1000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 	case SPEED_2500:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 	case SPEED_10000:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	default:
-		dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	}
 
-	dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
-							 ETH_LINK_HALF_DUPLEX;
-	dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
-							   ETH_LINK_FIXED;
+	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+							 RTE_ETH_LINK_HALF_DUPLEX;
+	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+							   RTE_ETH_LINK_FIXED;
 	pp2_ppio_get_link_state(priv->ppio, &link_up);
-	dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -1709,11 +1709,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 {
 	struct mrvl_priv *priv = dev->data->dev_private;
 
-	info->speed_capa = ETH_LINK_SPEED_10M |
-			   ETH_LINK_SPEED_100M |
-			   ETH_LINK_SPEED_1G |
-			   ETH_LINK_SPEED_2_5G |
-			   ETH_LINK_SPEED_10G;
+	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+			   RTE_ETH_LINK_SPEED_100M |
+			   RTE_ETH_LINK_SPEED_1G |
+			   RTE_ETH_LINK_SPEED_2_5G |
+			   RTE_ETH_LINK_SPEED_10G;
 
 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
@@ -1733,9 +1733,9 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
 
-	info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-				       ETH_RSS_NONFRAG_IPV4_TCP |
-				       ETH_RSS_NONFRAG_IPV4_UDP;
+	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	/* By default packets are dropped if no descriptors are available */
 	info->default_rxconf.rx_drop_en = 1;
@@ -1864,13 +1864,13 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		MRVL_LOG(ERR, "VLAN stripping is not supported\n");
 		return -ENOTSUP;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			ret = mrvl_populate_vlan_table(dev, 1);
 		else
 			ret = mrvl_populate_vlan_table(dev, 0);
@@ -1879,7 +1879,7 @@ static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			return ret;
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 		MRVL_LOG(ERR, "Extend VLAN not supported\n");
 		return -ENOTSUP;
 	}
@@ -2022,7 +2022,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -2182,7 +2182,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return ret;
 	}
 
-	fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
 
 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
 	if (ret) {
@@ -2191,10 +2191,10 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	if (en) {
-		if (fc_conf->mode == RTE_FC_NONE)
-			fc_conf->mode = RTE_FC_TX_PAUSE;
+		if (fc_conf->mode == RTE_ETH_FC_NONE)
+			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		else
-			fc_conf->mode = RTE_FC_FULL;
+			fc_conf->mode = RTE_ETH_FC_FULL;
 	}
 
 	return 0;
@@ -2240,19 +2240,19 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		rx_en = 1;
 		tx_en = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		rx_en = 0;
 		tx_en = 1;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		rx_en = 1;
 		tx_en = 0;
 		break;
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		rx_en = 0;
 		tx_en = 0;
 		break;
@@ -2329,11 +2329,11 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hash_type == PP2_PPIO_HASH_T_NONE)
 		rss_conf->rss_hf = 0;
 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
-		rss_conf->rss_hf = ETH_RSS_IPV4;
+		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
-		rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	return 0;
 }
@@ -3152,7 +3152,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
 	eth_dev->dev_ops = &mrvl_ops;
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_eth_dev_probing_finish(eth_dev);
 	return 0;
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 9e2a40597349..9c4ae80e7e16 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -40,16 +40,16 @@
 #include "hn_nvs.h"
 #include "ndis.h"
 
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
-			    DEV_TX_OFFLOAD_TCP_CKSUM  | \
-			    DEV_TX_OFFLOAD_UDP_CKSUM  | \
-			    DEV_TX_OFFLOAD_TCP_TSO    | \
-			    DEV_TX_OFFLOAD_MULTI_SEGS | \
-			    DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+			    RTE_ETH_TX_OFFLOAD_TCP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
+			    RTE_ETH_TX_OFFLOAD_TCP_TSO    | \
+			    RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+			    RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
-			    DEV_RX_OFFLOAD_VLAN_STRIP | \
-			    DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+			    RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			    RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NETVSC_ARG_LATENCY "latency"
 #define NETVSC_ARG_RXBREAK "rx_copybreak"
@@ -238,21 +238,21 @@ hn_dev_link_update(struct rte_eth_dev *dev,
 	hn_rndis_get_linkspeed(hv);
 
 	link = (struct rte_eth_link) {
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
 		.link_speed = hv->link_speed / 10000,
 	};
 
 	if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	else
-		link.link_status = ETH_LINK_DOWN;
+		link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (old.link_status == link.link_status)
 		return 0;
 
 	PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
-		     (link.link_status == ETH_LINK_UP) ? "up" : "down");
+		     (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -263,14 +263,14 @@ static int hn_dev_info_get(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int rc;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_pktlen  = HN_MAX_XFER_LEN;
 	dev_info->max_mac_addrs  = 1;
 
 	dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
 	dev_info->flow_type_rss_offloads = hv->rss_offloads;
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 
 	dev_info->max_rx_queues = hv->max_queues;
 	dev_info->max_tx_queues = hv->max_queues;
@@ -306,8 +306,8 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -346,8 +346,8 @@ static int hn_rss_reta_query(struct rte_eth_dev *dev,
 	}
 
 	for (i = 0; i < NDIS_HASH_INDCNT; i++) {
-		uint16_t idx = i / RTE_RETA_GROUP_SIZE;
-		uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+		uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint64_t mask = (uint64_t)1 << shift;
 
 		if (reta_conf[idx].mask & mask)
@@ -362,17 +362,17 @@ static void hn_rss_hash_init(struct hn_data *hv,
 	/* Convert from DPDK RSS hash flags to NDIS hash flags */
 	hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
 
-	if (rss_conf->rss_hf & ETH_RSS_IPV4)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
 		hv->rss_hash |= NDIS_HASH_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV4;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
 		hv->rss_hash |=  NDIS_HASH_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
 		hv->rss_hash |=  NDIS_HASH_IPV6_EX;
-	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6;
-	if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+	if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 		hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
 
 	memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
@@ -427,22 +427,22 @@ static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_conf->rss_hf = 0;
 	if (hv->rss_hash & NDIS_HASH_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_IPV4;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_IPV6;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
 
 	if (hv->rss_hash & NDIS_HASH_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
-		rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
-		rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+		rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
 
 	return 0;
 }
@@ -686,8 +686,8 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
 	if (unsupported) {
@@ -705,7 +705,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	err = hn_rndis_conf_offload(hv, txmode->offloads,
 				    rxmode->offloads);
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index 62ba39636cd8..1b63b27e0c3e 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -710,15 +710,15 @@ hn_rndis_query_rsscaps(struct hn_data *hv,
 
 	hv->rss_offloads = 0;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
-		hv->rss_offloads |= ETH_RSS_IPV4
-			| ETH_RSS_NONFRAG_IPV4_TCP
-			| ETH_RSS_NONFRAG_IPV4_UDP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV4
+			| RTE_ETH_RSS_NONFRAG_IPV4_TCP
+			| RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
-		hv->rss_offloads |= ETH_RSS_IPV6
-			| ETH_RSS_NONFRAG_IPV6_TCP;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6
+			| RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
-		hv->rss_offloads |= ETH_RSS_IPV6_EX
-			| ETH_RSS_IPV6_TCP_EX;
+		hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+			| RTE_ETH_RSS_IPV6_TCP_EX;
 
 	/* Commit! */
 	*rxr_cnt0 = rxr_cnt;
@@ -800,7 +800,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 		params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -812,7 +812,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
 		    == NDIS_RXCSUM_CAP_TCP4)
 			params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
@@ -826,7 +826,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
 			params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
@@ -839,7 +839,7 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+	if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
 			params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
@@ -851,21 +851,21 @@ int hn_rndis_conf_offload(struct hn_data *hv,
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
 		if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
 		    == NDIS_TXCSUM_CAP_IP4)
 			params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
 		else
 			goto unsupported;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
 			params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
 		else
 			goto unsupported;
 	}
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
 			params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
 		else
@@ -907,41 +907,41 @@ int hn_rndis_get_offload(struct hn_data *hv,
 		return error;
 	}
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
 	    == HN_NDIS_TXCSUM_CAP_IP4)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
 	    == HN_NDIS_TXCSUM_CAP_TCP4 &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
 	    == HN_NDIS_TXCSUM_CAP_TCP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
 
 	if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
 	    (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
 	    == HN_NDIS_LSOV2_CAP_IP6)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				    DEV_RX_OFFLOAD_RSS_HASH;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				    RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
 	    (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 
 	return 0;
 }
diff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c
index 99d93ebf4667..3c39937816a4 100644
--- a/drivers/net/nfb/nfb_ethdev.c
+++ b/drivers/net/nfb/nfb_ethdev.c
@@ -200,7 +200,7 @@ nfb_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -268,26 +268,26 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 
 	status.speed = MAC_SPEED_UNKNOWN;
 
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_status = ETH_LINK_DOWN;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
 
 	if (internals->rxmac[0] != NULL) {
 		nc_rxmac_read_status(internals->rxmac[0], &status);
 
 		switch (status.speed) {
 		case MAC_SPEED_10G:
-			link.link_speed = ETH_SPEED_NUM_10G;
+			link.link_speed = RTE_ETH_SPEED_NUM_10G;
 			break;
 		case MAC_SPEED_40G:
-			link.link_speed = ETH_SPEED_NUM_40G;
+			link.link_speed = RTE_ETH_SPEED_NUM_40G;
 			break;
 		case MAC_SPEED_100G:
-			link.link_speed = ETH_SPEED_NUM_100G;
+			link.link_speed = RTE_ETH_SPEED_NUM_100G;
 			break;
 		default:
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			break;
 		}
 	}
@@ -296,7 +296,7 @@ nfb_eth_link_update(struct rte_eth_dev *dev,
 		nc_rxmac_read_status(internals->rxmac[i], &status);
 
 		if (status.enabled && status.link_up) {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			break;
 		}
 	}
diff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c
index 3ebb332ae46c..f76e2ba64621 100644
--- a/drivers/net/nfb/nfb_rx.c
+++ b/drivers/net/nfb/nfb_rx.c
@@ -42,7 +42,7 @@ nfb_check_timestamp(struct rte_devargs *devargs)
 	}
 	/* Timestamps are enabled when there is
 	 * key-value pair: enable_timestamp=1
-	 * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+	 * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
 	 */
 	if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
 		timestamp_check_handler, NULL) < 0) {
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 0003fd54dde5..3ea697c54462 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -160,8 +160,8 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* Checking TX mode */
 	if (txmode->mq_mode) {
@@ -170,7 +170,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX mode */
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
@@ -359,19 +359,19 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 	}
 
 	hw->mtu = dev->data->mtu;
 
-	if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	/* L2 broadcast */
@@ -383,13 +383,13 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -397,7 +397,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -485,14 +485,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	int ret;
 
 	static const uint32_t ls_to_ethtool[] = {
-		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
-		[NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
-		[NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
-		[NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
-		[NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
-		[NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
-		[NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
+		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
+		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
+		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
+		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
+		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
+		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 	};
 
 	PMD_DRV_LOG(DEBUG, "Link update");
@@ -504,15 +504,15 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
 
 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	else
 		link.link_speed = ls_to_ethtool[nn_link_status];
 
@@ -701,26 +701,26 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
-		dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-					     DEV_RX_OFFLOAD_UDP_CKSUM |
-					     DEV_RX_OFFLOAD_TCP_CKSUM;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-		dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-					     DEV_TX_OFFLOAD_UDP_CKSUM |
-					     DEV_TX_OFFLOAD_TCP_CKSUM;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -757,22 +757,22 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	};
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-		dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
-						   ETH_RSS_NONFRAG_IPV4_TCP |
-						   ETH_RSS_NONFRAG_IPV4_UDP |
-						   ETH_RSS_IPV6 |
-						   ETH_RSS_NONFRAG_IPV6_TCP |
-						   ETH_RSS_NONFRAG_IPV6_UDP;
+		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+						   RTE_ETH_RSS_IPV6 |
+						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			       ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-			       ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -843,7 +843,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	if (link.link_status)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == ETH_LINK_FULL_DUPLEX
+			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 			    ? "full-duplex" : "half-duplex");
 	else
 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
@@ -973,12 +973,12 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	new_ctrl = 0;
 
 	/* Enable vlan strip if it is not configured yet */
-	if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 
 	/* Disable vlan strip just if it is configured */
-	if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
@@ -1018,8 +1018,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1099,8 +1099,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
 	 */
 	for (i = 0; i < reta_size; i += 4) {
 		/* Handling 4 RSS entries per loop */
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
 
 		if (!mask)
@@ -1138,22 +1138,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 	rss_hf = rss_conf->rss_hf;
 
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1223,22 +1223,22 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 1169ea77a8c7..e08e594b04fe 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -141,7 +141,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 62cb3536e0c9..817fe64dbceb 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -103,7 +103,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
 		nfp_net_rss_config_default(dev);
 		update |= NFP_NET_CFG_UPDATE_RSS;
 		new_ctrl |= NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615adfa..fc76b84b5b66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -409,7 +409,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	dev->data->dev_link.link_status = link_up;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 		negotiate = true;
 
 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
@@ -418,11 +418,11 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 
 	allowed_speeds = 0;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_1G;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_100M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
-		allowed_speeds |= ETH_LINK_SPEED_10M;
+		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
 
 	if (*link_speeds & ~allowed_speeds) {
 		PMD_INIT_LOG(ERR, "Invalid link setting");
@@ -430,14 +430,14 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = hw->mac.default_speeds;
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= NGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= NGBE_LINK_SPEED_100M_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_10M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
 			speed |= NGBE_LINK_SPEED_10M_FULL;
 	}
 
@@ -653,8 +653,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
-				ETH_LINK_SPEED_10M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+				RTE_ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -682,11 +682,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			~ETH_LINK_SPEED_AUTONEG);
+			~RTE_ETH_LINK_SPEED_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -699,8 +699,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_NONE;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -708,27 +708,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		return rte_eth_linkstatus_set(dev, &link);
 
 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case NGBE_LINK_SPEED_UNKNOWN:
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 		break;
 
 	case NGBE_LINK_SPEED_10M_FULL:
-		link.link_speed = ETH_SPEED_NUM_10M;
+		link.link_speed = RTE_ETH_SPEED_NUM_10M;
 		lan_speed = 0;
 		break;
 
 	case NGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		lan_speed = 1;
 		break;
 
 	case NGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		lan_speed = 2;
 		break;
 	}
@@ -912,11 +912,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev)
 
 	rte_eth_linkstatus_get(dev, &link);
 
-	if (link.link_status == ETH_LINK_UP) {
+	if (link.link_status == RTE_ETH_LINK_UP) {
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -956,7 +956,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
 		ngbe_dev_link_update(dev, 0);
 
 		/* likely to up */
-		if (link.link_status != ETH_LINK_UP)
+		if (link.link_status != RTE_ETH_LINK_UP)
 			/* handle it 1 sec later, wait it being stable */
 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
 		/* likely to down */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 25b9e5b1ce1b..ca03469d0e6d 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -61,16 +61,16 @@ struct pmd_internals {
 	rte_spinlock_t rss_lock;
 
 	uint16_t reta_size;
-	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
-			RTE_RETA_GROUP_SIZE];
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+			RTE_ETH_RETA_GROUP_SIZE];
 
 	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
@@ -189,7 +189,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -199,7 +199,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	if (dev == NULL)
 		return 0;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
@@ -391,9 +391,9 @@ eth_rss_reta_update(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
 		internal->reta_conf[i].mask = reta_conf[i].mask;
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 	}
@@ -416,8 +416,8 @@ eth_rss_reta_query(struct rte_eth_dev *dev,
 	rte_spinlock_lock(&internal->rss_lock);
 
 	/* Copy RETA table */
-	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 	}
@@ -548,8 +548,8 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	internals->port_id = eth_dev->data->port_id;
 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
 
-	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
-	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
 
 	rte_memcpy(internals->rss_key, default_rss_key, 40);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index f578123ed00b..5b8cbec67b5d 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -158,7 +158,7 @@ octeontx_link_status_print(struct rte_eth_dev *eth_dev,
 		octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
 			  (eth_dev->data->port_id),
 			  link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		octeontx_log_info("Port %d: Link Down",
@@ -171,38 +171,38 @@ octeontx_link_status_update(struct octeontx_nic *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	switch (nic->speed) {
 	case OCTEONTX_LINK_SPEED_SGMII:
-		link->link_speed = ETH_SPEED_NUM_1G;
+		link->link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_XAUI:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RXAUI:
 	case OCTEONTX_LINK_SPEED_10G_R:
-		link->link_speed = ETH_SPEED_NUM_10G;
+		link->link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	case OCTEONTX_LINK_SPEED_QSGMII:
-		link->link_speed = ETH_SPEED_NUM_5G;
+		link->link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 	case OCTEONTX_LINK_SPEED_40G_R:
-		link->link_speed = ETH_SPEED_NUM_40G;
+		link->link_speed = RTE_ETH_SPEED_NUM_40G;
 		break;
 
 	case OCTEONTX_LINK_SPEED_RESERVE1:
 	case OCTEONTX_LINK_SPEED_RESERVE2:
 	default:
-		link->link_speed = ETH_SPEED_NUM_NONE;
+		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
 		octeontx_log_err("incorrect link speed %d", nic->speed);
 		break;
 	}
 
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -355,20 +355,20 @@ octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= OCCTX_TX_MULTI_SEG_F;
 
 	return flags;
@@ -380,21 +380,21 @@ octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
 	uint16_t flags = 0;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= OCCTX_RX_OFFLOAD_CSUM_F;
 
-	if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		flags |= OCCTX_RX_MULTI_SEG_F;
 		eth_dev->data->scattered_rx = 1;
 		/* If scatter mode is enabled, TX should also be in multi
 		 * seg mode, else memory leak will occur
 		 */
-		nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	return flags;
@@ -423,18 +423,18 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+		txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		octeontx_log_err("setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -530,13 +530,13 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		octeontx_log_err("Scatter mode is disabled");
 		return -EINVAL;
 	}
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -571,7 +571,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
 
 	/* Setup scatter mode if needed by jumbo */
 	if (data->mtu > buffsz) {
-		nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+		nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
 		nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
 	}
@@ -843,10 +843,10 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_40G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+			RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_40G;
 
 	/* Min/Max MTU supported */
 	dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
@@ -1356,7 +1356,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 	nic->ev_ports = 1;
 	nic->print_flag = -1;
 
-	data->dev_link.link_status = ETH_LINK_DOWN;
+	data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	data->dev_started = 0;
 	data->promiscuous = 0;
 	data->all_multicast = 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 3a02824e3948..c493fa7a03ed 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -55,23 +55,23 @@
 #define OCCTX_MAX_MTU		(OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
 
 #define OCTEONTX_RX_OFFLOADS		(				   \
-					 DEV_RX_OFFLOAD_CHECKSUM	 | \
-					 DEV_RX_OFFLOAD_SCTP_CKSUM       | \
-					 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_RX_OFFLOAD_SCATTER	         | \
-					 DEV_RX_OFFLOAD_SCATTER		 | \
-					 DEV_RX_OFFLOAD_VLAN_FILTER)
+					 RTE_ETH_RX_OFFLOAD_CHECKSUM	 | \
+					 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       | \
+					 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER	         | \
+					 RTE_ETH_RX_OFFLOAD_SCATTER		 | \
+					 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 
 #define OCTEONTX_TX_OFFLOADS		(				   \
-					 DEV_TX_OFFLOAD_MBUF_FAST_FREE	 | \
-					 DEV_TX_OFFLOAD_MT_LOCKFREE	 | \
-					 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-					 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_IPV4_CKSUM	 | \
-					 DEV_TX_OFFLOAD_TCP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_UDP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_SCTP_CKSUM	 | \
-					 DEV_TX_OFFLOAD_MULTI_SEGS)
+					 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	 | \
+					 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	 | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+					 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_TCP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_UDP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	 | \
+					 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
diff --git a/drivers/net/octeontx/octeontx_ethdev_ops.c b/drivers/net/octeontx/octeontx_ethdev_ops.c
index dbe13ce3826b..6ec2b71b0672 100644
--- a/drivers/net/octeontx/octeontx_ethdev_ops.c
+++ b/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -43,20 +43,20 @@ octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			rc = octeontx_vlan_hw_filter(nic, true);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
 		} else {
 			rc = octeontx_vlan_hw_filter(nic, false);
 			if (rc)
 				goto done;
 
-			nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
 		}
 	}
@@ -139,7 +139,7 @@ octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
 
 	TAILQ_INIT(&nic->vlan_info.fltr_tbl);
 
-	rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 	if (rc)
 		octeontx_log_err("Failed to set vlan offload rc=%d", rc);
 
@@ -219,13 +219,13 @@ octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
 		return rc;
 
 	if (conf.rx_pause && conf.tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (conf.rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (conf.tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	/* low_water & high_water values are in Bytes */
 	fc_conf->low_water = conf.low_water;
@@ -272,10 +272,10 @@ octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-			(fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+			(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	conf.high_water = fc_conf->high_water;
 	conf.low_water = fc_conf->low_water;
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index f491e20e95c1..060d267f5de5 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
 	if (otx2_dev_is_vf(dev) ||
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-		capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+		capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 	return capa;
 }
@@ -33,10 +33,10 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
 	/* TSO not supported for earlier chip revisions */
 	if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-		capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-			  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-			  DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+			  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 	return capa;
 }
 
@@ -66,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
 	req->npa_func = otx2_npa_pf_func_get();
 	req->sso_func = otx2_sso_pf_func_get();
 	req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM)) {
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
 		req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
 		req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
 	}
@@ -373,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
 	aq->rq.sso_ena = 0;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		aq->rq.ipsech_ena = 1;
 
 	aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -665,7 +665,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 	 * These are needed in deriving raw clock value from tsc counter.
 	 * read_clock eth op returns raw clock value.
 	 */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev)) {
 		rc = otx2_nix_raw_clock_tsc_conv(dev);
 		if (rc) {
@@ -692,7 +692,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 	 * Maximum three segments can be supported with W8, Choose
 	 * NIX_MAXSQESZ_W16 for multi segment offload.
 	 */
-	if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		return NIX_MAXSQESZ_W16;
 	else
 		return NIX_MAXSQESZ_W8;
@@ -707,29 +707,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	uint16_t flags = 0;
 
-	if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-			(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+			(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		flags |= NIX_RX_OFFLOAD_RSS_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-			 DEV_RX_OFFLOAD_UDP_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			 RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+				RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		flags |= NIX_RX_MULTI_SEG_F;
 
-	if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_QINQ_STRIP))
+	if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+				RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 		flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 		flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
 	if (!dev->ptype_disable)
@@ -768,43 +768,43 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 			 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-	if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-	    conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+	if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+	    conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-	if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-	if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-	    conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+	if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+	    conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 		flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-	if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+	if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 		flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-	if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		flags |= NIX_TX_MULTI_SEG_F;
 
 	/* Enable Inner checksum for TSO */
-	if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+	if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
 	/* Enable Inner and Outer checksum for Tunnel TSO */
-	if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-		    DEV_TX_OFFLOAD_GRE_TNL_TSO))
+	if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+		    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 		flags |= (NIX_TX_OFFLOAD_TSO_F |
 			  NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 			  NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-	if (conf & DEV_TX_OFFLOAD_SECURITY)
+	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
 	return flags;
@@ -914,8 +914,8 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
 	if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
-		dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-		dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 		/* Setting up the rx[tx]_offload_flags due to change
 		 * in rx[tx]_offloads.
@@ -1848,21 +1848,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 		goto fail_configure;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-	    rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+	    rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
 		goto fail_configure;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
 		goto fail_configure;
 	}
 
 	if (otx2_dev_is_Ax(dev) &&
-	    (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-	    ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-	    (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+	    ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 		otx2_err("Outer IP and SCTP checksum unsupported");
 		goto fail_configure;
 	}
@@ -2235,7 +2235,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
 	 * enabled in PF owning this VF
 	 */
 	memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 	    otx2_ethdev_is_ptp_en(dev))
 		otx2_nix_timesync_enable(eth_dev);
 	else
@@ -2563,8 +2563,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 	rc = otx2_eth_sec_ctx_create(eth_dev);
 	if (rc)
 		goto free_mac_addrs;
-	dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+	dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
 	/* Initialize rte-flow */
 	rc = otx2_flow_init(dev);
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index 4557a0ee1945..a5282c6c1231 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -117,43 +117,43 @@
 #define CQ_TIMER_THRESH_DEFAULT	0xAULL /* ~1usec i.e (0xA * 100nsec) */
 #define CQ_TIMER_THRESH_MAX     255
 
-#define NIX_RSS_L3_L4_SRC_DST  (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
-				| ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST  (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+				| RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
 
-#define NIX_RSS_OFFLOAD		(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
-				 ETH_RSS_TCP | ETH_RSS_SCTP | \
-				 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
-				 NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
-				 ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD		(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+				 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+				 RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+				 NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+				 RTE_ETH_RSS_C_VLAN)
 
 #define NIX_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE	| \
-	DEV_TX_OFFLOAD_MT_LOCKFREE	| \
-	DEV_TX_OFFLOAD_VLAN_INSERT	| \
-	DEV_TX_OFFLOAD_QINQ_INSERT	| \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
-	DEV_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_CKSUM	| \
-	DEV_TX_OFFLOAD_UDP_CKSUM	| \
-	DEV_TX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_TX_OFFLOAD_TCP_TSO		| \
-	DEV_TX_OFFLOAD_VXLAN_TNL_TSO    | \
-	DEV_TX_OFFLOAD_GENEVE_TNL_TSO   | \
-	DEV_TX_OFFLOAD_GRE_TNL_TSO	| \
-	DEV_TX_OFFLOAD_MULTI_SEGS	| \
-	DEV_TX_OFFLOAD_IPV4_CKSUM)
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE	| \
+	RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	| \
+	RTE_ETH_TX_OFFLOAD_VLAN_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_QINQ_INSERT	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_TX_OFFLOAD_TCP_TSO		| \
+	RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    | \
+	RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   | \
+	RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	| \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS	| \
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 
 #define NIX_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM		| \
-	DEV_RX_OFFLOAD_SCTP_CKSUM	| \
-	DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_RX_OFFLOAD_SCATTER		| \
-	DEV_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
-	DEV_RX_OFFLOAD_VLAN_STRIP	| \
-	DEV_RX_OFFLOAD_VLAN_FILTER	| \
-	DEV_RX_OFFLOAD_QINQ_STRIP	| \
-	DEV_RX_OFFLOAD_TIMESTAMP	| \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM		| \
+	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_RX_OFFLOAD_SCATTER		| \
+	RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_VLAN_FILTER	| \
+	RTE_ETH_RX_OFFLOAD_QINQ_STRIP	| \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP	| \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NIX_DEFAULT_RSS_CTX_GROUP  0
 #define NIX_DEFAULT_RSS_MCAM_IDX  -1
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index 83f905315b38..60bf6c3f5f05 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -49,12 +49,12 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
 
 	val = atoi(value);
 
-	if (val <= ETH_RSS_RETA_SIZE_64)
-		val = ETH_RSS_RETA_SIZE_64;
-	else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
-		val = ETH_RSS_RETA_SIZE_128;
-	else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
-		val = ETH_RSS_RETA_SIZE_256;
+	if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+		val = RTE_ETH_RSS_RETA_SIZE_64;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+		val = RTE_ETH_RSS_RETA_SIZE_128;
+	else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+		val = RTE_ETH_RSS_RETA_SIZE_256;
 	else
 		val = NIX_RSS_RETA_SIZE;
 
diff --git a/drivers/net/octeontx2/otx2_ethdev_ops.c b/drivers/net/octeontx2/otx2_ethdev_ops.c
index 22a8af5cba45..d5caaa326a5a 100644
--- a/drivers/net/octeontx2/otx2_ethdev_ops.c
+++ b/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -26,11 +26,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	 * when this feature has not been enabled before.
 	 */
 	if (data->dev_started && frame_size > buffsz &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
 		return -EINVAL;
 
 	/* Check <seg size> * <max_seg>  >= max_frame */
-	if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)	&&
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
 	    (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
 		return -EINVAL;
 
@@ -568,17 +568,17 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	};
 
 	/* Auto negotiation disabled */
-	devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
-		devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-			ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+		devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
 
 		/* 50G and 100G to be supported for board version C0
 		 * and above.
 		 */
 		if (!otx2_dev_is_Ax(dev))
-			devinfo->speed_capa |= ETH_LINK_SPEED_50G |
-					       ETH_LINK_SPEED_100G;
+			devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+					       RTE_ETH_LINK_SPEED_100G;
 	}
 
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c
index 7bd1ed6da043..4d40184de46d 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec.c
+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -869,8 +869,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
 	RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
 			 !RTE_IS_POWER_OF_2(sa_width));
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return 0;
 
 	if (rte_security_dynfield_register() < 0)
@@ -912,8 +912,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
 	uint16_t port = eth_dev->data->port_id;
 	char name[RTE_MEMZONE_NAMESIZE];
 
-	if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
-	    !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	lookup_mem_sa_tbl_clear(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c
index 6df0732189eb..1d0fe4e950d4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -625,7 +625,7 @@ otx2_flow_create(struct rte_eth_dev *dev,
 		goto err_exit;
 	}
 
-	if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		rc = flow_update_sec_tt(dev, actions);
 		if (rc != 0) {
 			rte_flow_error_set(error, EIO,
diff --git a/drivers/net/octeontx2/otx2_flow_ctrl.c b/drivers/net/octeontx2/otx2_flow_ctrl.c
index 76bf48100183..071740de86a7 100644
--- a/drivers/net/octeontx2/otx2_flow_ctrl.c
+++ b/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -54,7 +54,7 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 	int rc;
 
 	if (otx2_dev_is_lbk(dev)) {
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		return 0;
 	}
 
@@ -66,13 +66,13 @@ otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 		goto done;
 
 	if (rsp->rx_pause && rsp->tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rsp->rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (rsp->tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 done:
 	return rc;
@@ -159,10 +159,10 @@ otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	if (fc_conf->mode == fc->mode)
 		return 0;
 
-	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_RX_PAUSE);
-	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-		    (fc_conf->mode == RTE_FC_TX_PAUSE);
+	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
 	/* Check if TX pause frame is already enabled or not */
 	if (fc->tx_pause ^ tx_pause) {
@@ -212,11 +212,11 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
 	if (otx2_dev_is_Ax(dev) &&
 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
-	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+	    (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
 		fc_conf.mode =
-				(fc_conf.mode == RTE_FC_FULL ||
-				fc_conf.mode == RTE_FC_TX_PAUSE) ?
-				RTE_FC_TX_PAUSE : RTE_FC_NONE;
+				(fc_conf.mode == RTE_ETH_FC_FULL ||
+				fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+				RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
 	}
 
 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
@@ -234,7 +234,7 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		return 0;
 
 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
-	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+	/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
 	 * by AF driver, update those info in PMD structure.
 	 */
 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
@@ -242,10 +242,10 @@ otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
 		goto exit;
 
 	fc->mode = fc_conf.mode;
-	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_RX_PAUSE);
-	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-			(fc_conf.mode == RTE_FC_TX_PAUSE);
+	fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+	fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+			(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
 
 exit:
 	return rc;
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c b/drivers/net/octeontx2/otx2_flow_parse.c
index 79b92fda8a4a..91267bbb8182 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -852,7 +852,7 @@ parse_rss_action(struct rte_eth_dev *dev,
 					  attr, "No support of RSS in egress");
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  act, "multi-queue mode is disabled");
@@ -1186,7 +1186,7 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
 		 *FLOW_KEY_ALG index. So, till we update the action with
 		 *flow_key_alg index, set the action to drop.
 		 */
-		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
 		else
 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
diff --git a/drivers/net/octeontx2/otx2_link.c b/drivers/net/octeontx2/otx2_link.c
index 81dd6243b977..8f5d0eed92b6 100644
--- a/drivers/net/octeontx2/otx2_link.c
+++ b/drivers/net/octeontx2/otx2_link.c
@@ -41,7 +41,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
 		otx2_info("Port %d: Link Up - speed %u Mbps - %s",
 			  (int)(eth_dev->data->port_id),
 			  (uint32_t)link->link_speed,
-			  link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+			  link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 			  "full-duplex" : "half-duplex");
 	else
 		otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
@@ -92,7 +92,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 
 	eth_link.link_status = link->link_up;
 	eth_link.link_speed = link->speed;
-	eth_link.link_autoneg = ETH_LINK_AUTONEG;
+	eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 	eth_link.link_duplex = link->full_duplex;
 
 	otx2_dev->speed = link->speed;
@@ -111,10 +111,10 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
 static int
 lbk_link_update(struct rte_eth_link *link)
 {
-	link->link_status = ETH_LINK_UP;
-	link->link_speed = ETH_SPEED_NUM_100G;
-	link->link_autoneg = ETH_LINK_FIXED;
-	link->link_duplex = ETH_LINK_FULL_DUPLEX;
+	link->link_status = RTE_ETH_LINK_UP;
+	link->link_speed = RTE_ETH_SPEED_NUM_100G;
+	link->link_autoneg = RTE_ETH_LINK_FIXED;
+	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	return 0;
 }
 
@@ -131,7 +131,7 @@ cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
 
 	link->link_status = rsp->link_info.link_up;
 	link->link_speed = rsp->link_info.speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (rsp->link_info.full_duplex)
 		link->link_duplex = rsp->link_info.full_duplex;
@@ -233,22 +233,22 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 
 	/* 50G and 100G to be supported for board version C0 and above */
 	if (!otx2_dev_is_Ax(dev)) {
-		if (link_speeds & ETH_LINK_SPEED_100G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_100G)
 			link_speed = 100000;
-		if (link_speeds & ETH_LINK_SPEED_50G)
+		if (link_speeds & RTE_ETH_LINK_SPEED_50G)
 			link_speed = 50000;
 	}
-	if (link_speeds & ETH_LINK_SPEED_40G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
 		link_speed = 40000;
-	if (link_speeds & ETH_LINK_SPEED_25G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
 		link_speed = 25000;
-	if (link_speeds & ETH_LINK_SPEED_20G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
 		link_speed = 20000;
-	if (link_speeds & ETH_LINK_SPEED_10G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
 		link_speed = 10000;
-	if (link_speeds & ETH_LINK_SPEED_5G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_5G)
 		link_speed = 5000;
-	if (link_speeds & ETH_LINK_SPEED_1G)
+	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
 		link_speed = 1000;
 
 	return link_speed;
@@ -257,11 +257,11 @@ nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
 static inline uint8_t
 nix_parse_eth_link_duplex(uint32_t link_speeds)
 {
-	if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
-			(link_speeds & ETH_LINK_SPEED_100M_HD))
-		return ETH_LINK_HALF_DUPLEX;
+	if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+			(link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+		return RTE_ETH_LINK_HALF_DUPLEX;
 	else
-		return ETH_LINK_FULL_DUPLEX;
+		return RTE_ETH_LINK_FULL_DUPLEX;
 }
 
 int
@@ -279,7 +279,7 @@ otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
 	cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
 	if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
 		cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
-		cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+		cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
 
 		return cgx_change_mode(dev, &cfg);
 	}
diff --git a/drivers/net/octeontx2/otx2_mcast.c b/drivers/net/octeontx2/otx2_mcast.c
index f84aa1bf570c..b9c63ad3bc21 100644
--- a/drivers/net/octeontx2/otx2_mcast.c
+++ b/drivers/net/octeontx2/otx2_mcast.c
@@ -100,7 +100,7 @@ nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
 
 		action = NIX_RX_ACTIONOP_UCAST;
 
-		if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+		if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 			action = NIX_RX_ACTIONOP_RSS;
 			action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 		}
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 91e5c0f6bd11..abb213058792 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -250,7 +250,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 	/* System time should be already on by default */
 	nix_start_timecounters(eth_dev);
 
-	dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
@@ -287,7 +287,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 	if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
 		return -EINVAL;
 
-	dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+	dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7dbe5f69ae65..68cef1caa394 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -85,8 +85,8 @@ otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
 			if ((reta_conf[i].mask >> j) & 0x01)
 				rss->ind_tbl[idx] = reta_conf[i].reta[j];
 			idx++;
@@ -118,8 +118,8 @@ otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Copy RETA table */
-	for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = rss->ind_tbl[j];
 	}
@@ -178,23 +178,23 @@ rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
 }
 
 #define RSS_IPV4_ENABLE ( \
-			  ETH_RSS_IPV4 | \
-			  ETH_RSS_FRAG_IPV4 | \
-			  ETH_RSS_NONFRAG_IPV4_UDP | \
-			  ETH_RSS_NONFRAG_IPV4_TCP | \
-			  ETH_RSS_NONFRAG_IPV4_SCTP)
+			  RTE_ETH_RSS_IPV4 | \
+			  RTE_ETH_RSS_FRAG_IPV4 | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
 
 #define RSS_IPV6_ENABLE ( \
-			  ETH_RSS_IPV6 | \
-			  ETH_RSS_FRAG_IPV6 | \
-			  ETH_RSS_NONFRAG_IPV6_UDP | \
-			  ETH_RSS_NONFRAG_IPV6_TCP | \
-			  ETH_RSS_NONFRAG_IPV6_SCTP)
+			  RTE_ETH_RSS_IPV6 | \
+			  RTE_ETH_RSS_FRAG_IPV6 | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
 
 #define RSS_IPV6_EX_ENABLE ( \
-			     ETH_RSS_IPV6_EX | \
-			     ETH_RSS_IPV6_TCP_EX | \
-			     ETH_RSS_IPV6_UDP_EX)
+			     RTE_ETH_RSS_IPV6_EX | \
+			     RTE_ETH_RSS_IPV6_TCP_EX | \
+			     RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define RSS_MAX_LEVELS   3
 
@@ -233,24 +233,24 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 
 	dev->rss_info.nix_rss = ethdev_rss;
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
 	    dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 		flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
 	}
 
-	if (ethdev_rss & ETH_RSS_C_VLAN)
+	if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-	if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-	if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-	if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-	if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+	if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
 		flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
 	if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -259,34 +259,34 @@ otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
 	if (ethdev_rss & RSS_IPV6_ENABLE)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-	if (ethdev_rss & ETH_RSS_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_TCP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_UDP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_SCTP)
+	if (ethdev_rss & RTE_ETH_RSS_SCTP)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-	if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+	if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
 		flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
 	if (ethdev_rss & RSS_IPV6_EX_ENABLE)
 		flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-	if (ethdev_rss & ETH_RSS_NVGRE)
+	if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 		flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-	if (ethdev_rss & ETH_RSS_VXLAN)
+	if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 		flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-	if (ethdev_rss & ETH_RSS_GENEVE)
+	if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 		flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-	if (ethdev_rss & ETH_RSS_GTPU)
+	if (ethdev_rss & RTE_ETH_RSS_GTPU)
 		flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
 	return flowkey_cfg;
@@ -343,7 +343,7 @@ otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 		otx2_nix_rss_set_key(dev, rss_conf->rss_key,
 				     (uint32_t)rss_conf->rss_key_len);
 
-	rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg =
@@ -390,7 +390,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	int rc;
 
 	/* Skip further configuration if selected mode is not RSS */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
 		return 0;
 
 	/* Update default RSS key and cfg */
@@ -408,7 +408,7 @@ otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
 	}
 
 	rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-	rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
 	if (rss_hash_level)
 		rss_hash_level -= 1;
 	flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index 0d85c898bfe7..2c18483b98fd 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -414,12 +414,12 @@ NIX_RX_FASTPATH_MODES
 	/* For PTP enabled, scalar rx function should be chosen as most of the
 	 * PTP apps are implemented to rx burst 1 pkt.
 	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 		pick_rx_func(eth_dev, nix_eth_rx_burst);
 	else
 		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
-	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 
 	/* Copy multi seg version with no offload for tear down sequence */
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ad704d745b04..135615580bbf 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -1070,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
 	else
 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 		pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
 	rte_mb();
diff --git a/drivers/net/octeontx2/otx2_vlan.c b/drivers/net/octeontx2/otx2_vlan.c
index f5161e17a16d..cce643b7b51d 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -50,7 +50,7 @@ nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
 
 	action = NIX_RX_ACTIONOP_UCAST;
 
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		action = NIX_RX_ACTIONOP_RSS;
 		action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
 	}
@@ -99,7 +99,7 @@ nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
 	 * Take offset from LA since in case of untagged packet,
 	 * lbptr is zero.
 	 */
-	if (type == ETH_VLAN_TYPE_OUTER) {
+	if (type == RTE_ETH_VLAN_TYPE_OUTER) {
 		vtag_action.act.vtag0_def = vtag_index;
 		vtag_action.act.vtag0_lid = NPC_LID_LA;
 		vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
@@ -413,7 +413,7 @@ nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
 		if (vlan->strip_on ||
 		    (vlan->qinq_on && !vlan->qinq_before_def)) {
 			if (eth_dev->data->dev_conf.rxmode.mq_mode ==
-								ETH_MQ_RX_RSS)
+								RTE_ETH_MQ_RX_RSS)
 				vlan->def_rx_mcam_ent.action |=
 							NIX_RX_ACTIONOP_RSS;
 			else
@@ -717,48 +717,48 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
 	rxmode = &eth_dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, true);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			rc = nix_vlan_hw_strip(eth_dev, false);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-			offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, true, 0);
 		} else {
-			offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			rc = nix_vlan_hw_filter(eth_dev, false, 0);
 		}
 		if (rc)
 			goto done;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
 		if (!dev->vlan_info.qinq_on) {
-			offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, true);
 			if (rc)
 				goto done;
 		}
 	} else {
 		if (dev->vlan_info.qinq_on) {
-			offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+			offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 			rc = otx2_nix_config_double_vlan(eth_dev, false);
 			if (rc)
 				goto done;
 		}
 	}
 
-	if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-			DEV_RX_OFFLOAD_QINQ_STRIP)) {
+	if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+			RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
 		dev->rx_offloads |= offloads;
 		dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 		otx2_eth_set_rx_function(eth_dev);
@@ -780,7 +780,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
 
 	tpid_cfg->tpid = tpid;
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
 	else
 		tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
@@ -789,7 +789,7 @@ otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 	if (rc)
 		return rc;
 
-	if (type == ETH_VLAN_TYPE_OUTER)
+	if (type == RTE_ETH_VLAN_TYPE_OUTER)
 		dev->vlan_info.outer_vlan_tpid = tpid;
 	else
 		dev->vlan_info.inner_vlan_tpid = tpid;
@@ -864,7 +864,7 @@ otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev,       uint16_t vlan_id, int on)
 		vlan->outer_vlan_idx = 0;
 	}
 
-	rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+	rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
 					      vtag_index, on);
 	if (rc < 0) {
 		printf("Default tx entry failed with rc %d\n", rc);
@@ -986,12 +986,12 @@ otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
 	} else {
 		/* Reinstall all mcam entries now if filter offload is set */
 		if (eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_VLAN_FILTER)
+		    RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			nix_vlan_reinstall_vlan_filters(eth_dev);
 	}
 
 	mask =
-	    ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	    RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
 	rc = otx2_nix_vlan_offload_set(eth_dev, mask);
 	if (rc) {
 		otx2_err("Failed to set vlan offload rc=%d", rc);
diff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c
index 698d22e22685..74dc36a17648 100644
--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c
@@ -33,14 +33,14 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
 
 	otx_epvf = OTX_EP_DEV(eth_dev);
 
-	devinfo->speed_capa = ETH_LINK_SPEED_10G;
+	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
 
 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
-	devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
 
diff --git a/drivers/net/octeontx_ep/otx_ep_rxtx.c b/drivers/net/octeontx_ep/otx_ep_rxtx.c
index aa4dcd33cc79..9338b30672ec 100644
--- a/drivers/net/octeontx_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeontx_ep/otx_ep_rxtx.c
@@ -563,7 +563,7 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
@@ -697,7 +697,7 @@ otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
 			struct otx_ep_buf_free_info *finfo;
 			int j, frags, num_sg;
 
-			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+			if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 				goto xmit_fail;
 
 			finfo = (struct otx_ep_buf_free_info *)
@@ -954,7 +954,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
 	droq_pkt->l4_len = hdr_lens.l4_len;
 
 	if (droq_pkt->nb_segs > 1 &&
-	    !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
 		rte_pktmbuf_free(droq_pkt);
 		goto oq_read_fail;
 	}
diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index d695c5eef7b0..ec29fd6bc53c 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -136,10 +136,10 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-		.link_speed = ETH_SPEED_NUM_10G,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_FIXED,
+		.link_speed = RTE_ETH_SPEED_NUM_10G,
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN,
+		.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
@@ -659,7 +659,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -714,7 +714,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	return 0;
 }
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 4cc002ee8fab..047010e15ed0 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -22,15 +22,15 @@ struct pfe_vdev_init_params {
 static struct pfe *g_pfe;
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -601,9 +601,9 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 	}
 
 	link.link_status = lstatus;
-	link.link_speed = ETH_LINK_SPEED_1G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_speed = RTE_ETH_LINK_SPEED_1G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	pfe_eth_atomic_write_link_status(dev, &link);
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 6667c2d7ab6d..511742c6a1b3 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -65,8 +65,8 @@ typedef u32 offsize_t;      /* In DWORDS !!! */
 struct eth_phy_cfg {
 /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
 	u32 speed;
-#define ETH_SPEED_AUTONEG   0
-#define ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG   0
+#define RTE_ETH_SPEED_SMARTLINQ  0x8 /* deprecated - use link_modes field instead */
 
 	u32 pause;      /* bitmask */
 #define ETH_PAUSE_NONE		0x0
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 27f6932dc74e..c907d7fd8312 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -342,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 	}
 
 	use_tx_offload = !!(tx_offloads &
-			    (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
-			     DEV_TX_OFFLOAD_TCP_TSO | /* tso */
-			     DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+			    (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
 
 	if (use_tx_offload) {
 		DP_INFO(edev, "Assigning qede_xmit_pkts\n");
@@ -1002,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			(void)qede_vlan_stripping(eth_dev, 1);
 		else
 			(void)qede_vlan_stripping(eth_dev, 0);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 		/* VLAN filtering kicks in when a VLAN is added */
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
 			qede_vlan_filter_set(eth_dev, 0, 1);
 		} else {
 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1022,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 				 * enabled
 				 */
 				eth_dev->data->dev_conf.rxmode.offloads |=
-						DEV_RX_OFFLOAD_VLAN_FILTER;
+						RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 			} else {
 				qede_vlan_filter_set(eth_dev, 0, 0);
 			}
@@ -1069,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
 	/* Configure default RETA */
 	memset(reta_conf, 0, sizeof(reta_conf));
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-		id = i / RTE_RETA_GROUP_SIZE;
-		pos = i % RTE_RETA_GROUP_SIZE;
+		id = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		q = i % QEDE_RSS_COUNT(eth_dev);
 		reta_conf[id].reta[pos] = q;
 	}
@@ -1112,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Configure TPA parameters */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		if (qede_enable_tpa(eth_dev, true))
 			return -EINVAL;
 		/* Enable scatter mode for LRO */
 		if (!eth_dev->data->scattered_rx)
-			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	}
 
 	/* Start queues */
@@ -1132,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* We need to have min 1 RX queue.There is no min check in
 	 * rte_eth_dev_configure(), so we are checking it here.
@@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_NOTICE(edev, false,
 			  "Invalid devargs supplied, requested change will not take effect\n");
 
-	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+	if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+	      rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
 		DP_ERR(edev, "Unsupported multi-queue mode\n");
 		return -ENOTSUP;
 	}
@@ -1312,7 +1312,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 			return -ENOMEM;
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		eth_dev->data->scattered_rx = 1;
 
 	if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1321,8 +1321,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = eth_dev->data->mtu;
 
 	/* Enable VLAN offloads by default */
-	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-					     ETH_VLAN_FILTER_MASK);
+	ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+					     RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -1385,34 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_RX_OFFLOAD_UDP_CKSUM	|
-				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_RX_OFFLOAD_TCP_LRO	|
-				     DEV_RX_OFFLOAD_KEEP_CRC    |
-				     DEV_RX_OFFLOAD_SCATTER	|
-				     DEV_RX_OFFLOAD_VLAN_FILTER |
-				     DEV_RX_OFFLOAD_VLAN_STRIP  |
-				     DEV_RX_OFFLOAD_RSS_HASH);
+	dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_RX_OFFLOAD_TCP_LRO	|
+				     RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+				     RTE_ETH_RX_OFFLOAD_SCATTER	|
+				     RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+				     RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+				     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 	dev_info->rx_queue_offload_capa = 0;
 
 	/* TX offloads are on a per-packet basis, so it is applicable
 	 * to both at port and queue levels.
 	 */
-	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
-				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
-				     DEV_TX_OFFLOAD_UDP_CKSUM	|
-				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				     DEV_TX_OFFLOAD_MULTI_SEGS  |
-				     DEV_TX_OFFLOAD_TCP_TSO	|
-				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT	|
+				     RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_UDP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_TCP_CKSUM	|
+				     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+				     RTE_ETH_TX_OFFLOAD_TCP_TSO	|
+				     RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				     RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	};
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1424,17 +1424,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-		speed_cap |= ETH_LINK_SPEED_1G;
+		speed_cap |= RTE_ETH_LINK_SPEED_1G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-		speed_cap |= ETH_LINK_SPEED_10G;
+		speed_cap |= RTE_ETH_LINK_SPEED_10G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-		speed_cap |= ETH_LINK_SPEED_25G;
+		speed_cap |= RTE_ETH_LINK_SPEED_25G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-		speed_cap |= ETH_LINK_SPEED_40G;
+		speed_cap |= RTE_ETH_LINK_SPEED_40G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-		speed_cap |= ETH_LINK_SPEED_50G;
+		speed_cap |= RTE_ETH_LINK_SPEED_50G;
 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-		speed_cap |= ETH_LINK_SPEED_100G;
+		speed_cap |= RTE_ETH_LINK_SPEED_100G;
 	dev_info->speed_capa = speed_cap;
 
 	return 0;
@@ -1461,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	/* Link Mode */
 	switch (q_link.duplex) {
 	case QEDE_DUPLEX_HALF:
-		link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case QEDE_DUPLEX_FULL:
-		link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case QEDE_DUPLEX_UNKNOWN:
 	default:
@@ -1473,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 	link.link_duplex = link_duplex;
 
 	/* Link Status */
-	link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	/* AN */
 	link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+			     RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
 		link.link_speed, link.link_duplex,
@@ -2012,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
-	if (fc_conf->mode == RTE_FC_FULL)
+	if (fc_conf->mode == RTE_ETH_FC_FULL)
 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
 					QED_LINK_PAUSE_RX_ENABLE);
-	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+	if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
 	params.link_up = true;
@@ -2041,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
 					 QED_LINK_PAUSE_TX_ENABLE))
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -2088,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
 	*rss_caps = 0;
-	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+	*rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -2221,7 +2221,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	uint8_t entry;
 	int rc = 0;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
 		       reta_size);
 		return -EINVAL;
@@ -2245,8 +2245,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 
 	for_each_hwfn(edev, i) {
 		for (j = 0; j < reta_size; j++) {
-			idx = j / RTE_RETA_GROUP_SIZE;
-			shift = j % RTE_RETA_GROUP_SIZE;
+			idx = j / RTE_ETH_RETA_GROUP_SIZE;
+			shift = j % RTE_ETH_RETA_GROUP_SIZE;
 			if (reta_conf[idx].mask & (1ULL << shift)) {
 				entry = reta_conf[idx].reta[shift];
 				fid = entry * edev->num_hwfns + i;
@@ -2282,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 	uint16_t i, idx, shift;
 	uint8_t entry;
 
-	if (reta_size > ETH_RSS_RETA_SIZE_128) {
+	if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
 		DP_ERR(edev, "reta_size %d is not supported\n",
 		       reta_size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift)) {
 			entry = qdev->rss_ind_table[i];
 			reta_conf[idx].reta[shift] = entry;
@@ -2718,16 +2718,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	adapter->ipgre.num_filters = 0;
 	if (is_vf) {
 		adapter->vxlan.enable = true;
-		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
 		adapter->geneve.enable = true;
-		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					      ETH_TUNNEL_FILTER_IVLAN;
+		adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					      RTE_ETH_TUNNEL_FILTER_IVLAN;
 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
 		adapter->ipgre.enable = true;
-		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
-					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+					     RTE_ETH_TUNNEL_FILTER_IVLAN;
 	} else {
 		adapter->vxlan.enable = false;
 		adapter->geneve.enable = false;
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
index c756594bfc4b..440440423a32 100644
--- a/drivers/net/qede/qede_filter.c
+++ b/drivers/net/qede/qede_filter.c
@@ -20,97 +20,97 @@ const struct _qede_udp_tunn_types {
 	const char *string;
 } qede_tunn_types[] = {
 	{
-		ETH_TUNNEL_FILTER_OMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC,
 		ECORE_FILTER_MAC,
 		ECORE_TUNN_CLSS_MAC_VLAN,
 		"outer-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_VNI,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_VLAN,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
 		ECORE_FILTER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_MAC_VNI,
 		"outer-mac and vni"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-mac"
 	},
 	{
-		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-mac and inner-vlan"
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
 		"vni and inner-mac",
 	},
 	{
-		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"vni and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
 		ECORE_FILTER_INNER_PAIR,
 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
 		"inner-mac and inner-vlan",
 	},
 	{
-		ETH_TUNNEL_FILTER_OIP,
+		RTE_ETH_TUNNEL_FILTER_OIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"outer-IP"
 	},
 	{
-		ETH_TUNNEL_FILTER_IIP,
+		RTE_ETH_TUNNEL_FILTER_IIP,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"inner-IP"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_IVLAN_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_IMAC_TENID,
+		RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"IMAC_TENID"
 	},
 	{
-		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+		RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
 		ECORE_FILTER_UNUSED,
 		MAX_ECORE_TUNN_CLSS,
 		"OMAC_TENID_IMAC"
@@ -144,7 +144,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+	struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
 	/* check FDIR modes */
 	switch (fdir->mode) {
@@ -542,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -570,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
 					ECORE_TUNN_CLSS_MAC_VLAN, false);
 
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
 			DP_ERR(edev, "UDP port %u doesn't exist\n",
 				tunnel_udp->udp_port);
@@ -622,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 	memset(&tunn, 0, sizeof(tunn));
 
 	switch (tunnel_udp->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for VXLAN was already configured\n",
@@ -659,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
 		qdev->vxlan.udp_port = udp_port;
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
 			DP_INFO(edev,
 				"UDP port %u for GENEVE was already configured\n",
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index c2263787b4ec..d585db8b61e8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -249,7 +249,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 	/* cache align the mbuf size to simplfy rx_buf_size calculation */
 	bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)	||
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	||
 	    (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
 		if (!dev->data->scattered_rx) {
 			DP_INFO(edev, "Forcing scatter-gather mode\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index c9334448c887..15112b83f4f7 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -73,14 +73,14 @@
 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
 
-#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-				 ETH_RSS_NONFRAG_IPV4_TCP	|\
-				 ETH_RSS_NONFRAG_IPV4_UDP	|\
-				 ETH_RSS_IPV6			|\
-				 ETH_RSS_NONFRAG_IPV6_TCP	|\
-				 ETH_RSS_NONFRAG_IPV6_UDP	|\
-				 ETH_RSS_VXLAN			|\
-				 ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
+				 RTE_ETH_RSS_IPV6			|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
+				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
+				 RTE_ETH_RSS_VXLAN			|\
+				 RTE_ETH_RSS_GENEVE)
 
 #define QEDE_RXTX_MAX(qdev) \
 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 0440019e07e1..db10f035dfcb 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -56,10 +56,10 @@ struct pmd_internals {
 };
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
@@ -102,7 +102,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -110,21 +110,21 @@ static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	dev->data->dev_started = 0;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return 0;
 }
 
 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return 0;
 }
 
@@ -163,8 +163,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 431c42f508d0..9c1be10ac93d 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -106,13 +106,13 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 {
 	uint32_t phy_caps = 0;
 
-	if (~speeds & ETH_LINK_SPEED_FIXED) {
+	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		phy_caps |= (1 << EFX_PHY_CAP_AN);
 		/*
 		 * If no speeds are specified in the mask, any supported
 		 * may be negotiated
 		 */
-		if (speeds == ETH_LINK_SPEED_AUTONEG)
+		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
 			phy_caps |=
 				(1 << EFX_PHY_CAP_1000FDX) |
 				(1 << EFX_PHY_CAP_10000FDX) |
@@ -121,17 +121,17 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
 				(1 << EFX_PHY_CAP_50000FDX) |
 				(1 << EFX_PHY_CAP_100000FDX);
 	}
-	if (speeds & ETH_LINK_SPEED_1G)
+	if (speeds & RTE_ETH_LINK_SPEED_1G)
 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
-	if (speeds & ETH_LINK_SPEED_10G)
+	if (speeds & RTE_ETH_LINK_SPEED_10G)
 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
-	if (speeds & ETH_LINK_SPEED_25G)
+	if (speeds & RTE_ETH_LINK_SPEED_25G)
 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
-	if (speeds & ETH_LINK_SPEED_40G)
+	if (speeds & RTE_ETH_LINK_SPEED_40G)
 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
-	if (speeds & ETH_LINK_SPEED_50G)
+	if (speeds & RTE_ETH_LINK_SPEED_50G)
 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
-	if (speeds & ETH_LINK_SPEED_100G)
+	if (speeds & RTE_ETH_LINK_SPEED_100G)
 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
 
 	return phy_caps;
@@ -401,10 +401,10 @@ sfc_set_fw_subvariant(struct sfc_adapter *sa)
 			tx_offloads |= txq_info->offloads;
 	}
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM |
-			   DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
 	else
 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
@@ -899,7 +899,7 @@ sfc_attach(struct sfc_adapter *sa)
 	sa->priv.shared->tunnel_encaps =
 		encp->enc_tunnel_encapsulations_supported;
 
-	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
 			  encp->enc_tso_v3_enabled;
 		if (!sa->tso)
@@ -908,8 +908,8 @@ sfc_attach(struct sfc_adapter *sa)
 
 	if (sa->tso &&
 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
-	     (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-	      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
 				encp->enc_tso_v3_enabled;
 		if (!sa->tso_encap)
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index d958fd642fb1..eeb73a7530ef 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -979,11 +979,11 @@ struct sfc_dp_rx sfc_ef100_rx = {
 				  SFC_DP_RX_FEAT_INTR |
 				  SFC_DP_RX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_RX_OFFLOAD_SCATTER |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_SCATTER |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
 	.qcreate		= sfc_ef100_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index e166fda888b1..67980a587fe4 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -971,16 +971,16 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS |
 				  SFC_DP_TX_FEAT_STATS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_MULTI_SEGS |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 991329e86f01..9ea207cca163 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -746,8 +746,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_FLOW_FLAG |
 				  SFC_DP_RX_FEAT_FLOW_MARK,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
 	.queue_offload_capa	= 0,
 	.get_dev_info		= sfc_ef10_essb_rx_get_dev_info,
 	.pool_ops_supported	= sfc_ef10_essb_rx_pool_ops_supported,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 49a7d4fb42fd..9aaabd30eee6 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -819,10 +819,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
 	},
 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
 				  SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
 	.qcreate		= sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ed43adb4ca5c..e7da4608bcb0 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -958,9 +958,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
 	if (txq->sw_ring == NULL)
 		goto fail_sw_ring_alloc;
 
-	if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-			      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-			      DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+	if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+			      RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+			      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
 		txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
 					      info->txq_entries,
 					      SFC_TSOH_STD_LEN,
@@ -1125,14 +1125,14 @@ struct sfc_dp_tx sfc_ef10_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
@@ -1152,11 +1152,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
 		.type		= SFC_DP_TX,
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_MBUF_FAST_FREE,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
 	.get_dev_info		= sfc_ef10_get_dev_info,
 	.qsize_up_rings		= sfc_ef10_tx_qsize_up_rings,
 	.qcreate		= sfc_ef10_tx_qcreate,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index f5986b610fff..833d833a0408 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -105,19 +105,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = sa->sriov.num_vfs;
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
 	dev_info->max_rx_queues = sa->rxq_max;
 	dev_info->max_tx_queues = sa->txq_max;
@@ -145,8 +145,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
 				    dev_info->tx_queue_offload_capa;
 
-	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -989,16 +989,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
 	switch (link_fc) {
 	case 0:
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 		break;
 	case EFX_FCNTL_RESPOND:
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 		break;
 	case EFX_FCNTL_GENERATE:
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 		break;
 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 		break;
 	default:
 		sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -1029,16 +1029,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	switch (fc_conf->mode) {
-	case RTE_FC_NONE:
+	case RTE_ETH_FC_NONE:
 		fcntl = 0;
 		break;
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		fcntl = EFX_FCNTL_RESPOND;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		fcntl = EFX_FCNTL_GENERATE;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
 		break;
 	default:
@@ -1313,7 +1313,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 		qinfo->scattered_rx = 1;
 	}
 	qinfo->nb_desc = rxq_info->entries;
@@ -1523,9 +1523,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
 	switch (rte_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		return EFX_TUNNEL_PROTOCOL_VXLAN;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		return EFX_TUNNEL_PROTOCOL_GENEVE;
 	default:
 		return EFX_TUNNEL_NPROTOS;
@@ -1652,7 +1652,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	/*
 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
-	 * hence, conversion is done here to derive a correct set of ETH_RSS
+	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
 	 * flags which corresponds to the active EFX configuration stored
 	 * locally in 'sfc_adapter' and kept up-to-date
 	 */
@@ -1778,8 +1778,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp = entry / RTE_RETA_GROUP_SIZE;
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
 		if ((reta_conf[grp].mask >> grp_idx) & 1)
 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1828,10 +1828,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
 	for (entry = 0; entry < reta_size; entry++) {
-		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 		struct rte_eth_rss_reta_entry64 *grp;
 
-		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
 		if (grp->mask & (1ull << grp_idx)) {
 			if (grp->reta[grp_idx] >= rss->channels) {
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 8096af56739f..be2dfe778a0d 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -392,7 +392,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vlan *spec = NULL;
 	const struct rte_flow_item_vlan *mask = NULL;
 	const struct rte_flow_item_vlan supp_mask = {
-		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 		.inner_type = RTE_BE16(0xffff),
 	};
 
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index 5320d8903dac..27b02b1119fb 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -573,66 +573,66 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
 
 	memset(link_info, 0, sizeof(*link_info));
 	if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
-		link_info->link_status = ETH_LINK_DOWN;
+		link_info->link_status = RTE_ETH_LINK_DOWN;
 	else
-		link_info->link_status = ETH_LINK_UP;
+		link_info->link_status = RTE_ETH_LINK_UP;
 
 	switch (link_mode) {
 	case EFX_LINK_10HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_10FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_100FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100M;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100M;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_1000HDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 		break;
 	case EFX_LINK_1000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_1G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_1G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_10000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_10G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_10G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_25000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_25G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_25G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_40000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_40G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_40G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_50000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_50G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_50G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	case EFX_LINK_100000FDX:
-		link_info->link_speed  = ETH_SPEED_NUM_100G;
-		link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_100G;
+		link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		break;
 	default:
 		SFC_ASSERT(B_FALSE);
 		/* FALLTHROUGH */
 	case EFX_LINK_UNKNOWN:
 	case EFX_LINK_DOWN:
-		link_info->link_speed  = ETH_SPEED_NUM_NONE;
+		link_info->link_speed  = RTE_ETH_SPEED_NUM_NONE;
 		link_info->link_duplex = 0;
 		break;
 	}
 
-	link_info->link_autoneg = ETH_LINK_AUTONEG;
+	link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 int
diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c
index 2500b14cb006..9d88d554c1ba 100644
--- a/drivers/net/sfc/sfc_repr.c
+++ b/drivers/net/sfc/sfc_repr.c
@@ -405,7 +405,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 	}
 
 	switch (conf->rxmode.mq_mode) {
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (nb_rx_queues != 1) {
 			sfcr_err(sr, "Rx RSS is not supported with %u queues",
 				 nb_rx_queues);
@@ -420,7 +420,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 			ret = -EINVAL;
 		}
 		break;
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		break;
 	default:
 		sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
@@ -428,7 +428,7 @@ sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
 		break;
 	}
 
-	if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
 		sfcr_err(sr, "Tx mode MQ modes not supported");
 		ret = -EINVAL;
 	}
@@ -553,8 +553,8 @@ sfc_repr_dev_link_update(struct rte_eth_dev *dev,
 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
 	} else {
 		memset(&link, 0, sizeof(link));
-		link.link_status = ETH_LINK_UP;
-		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+		link.link_status = RTE_ETH_LINK_UP;
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c60ef17a922a..23df27c8f45a 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -648,9 +648,9 @@ struct sfc_dp_rx sfc_efx_rx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
 	},
 	.features		= SFC_DP_RX_FEAT_INTR,
-	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
-				  DEV_RX_OFFLOAD_RSS_HASH,
-	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
+	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
+				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
+	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
 	.qcreate		= sfc_efx_rx_qcreate,
 	.qdestroy		= sfc_efx_rx_qdestroy,
@@ -931,7 +931,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (encp->enc_tunnel_encapsulations_supported == 0)
-		no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	return ~no_caps;
 }
@@ -1140,7 +1140,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
 				  encp->enc_rx_prefix_size,
-				  (offloads & DEV_RX_OFFLOAD_SCATTER),
+				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
 				  encp->enc_rx_scatter_max,
 				  &error)) {
 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1166,15 +1166,15 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags |=
-		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-	     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-	if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
@@ -1211,7 +1211,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 	rxq_info->refill_mb_pool = mb_pool;
 
 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-	    (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
 	else
 		rxq_info->rxq_flags = 0;
@@ -1313,19 +1313,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-	{ ETH_RSS_NONFRAG_IPV4_TCP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV4_UDP,
+	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-	{ ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV4, 2TUPLE) },
-	{ ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-	  ETH_RSS_IPV6_EX,
+	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+	  RTE_ETH_RSS_IPV6_EX,
 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
 	  EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1645,10 +1645,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	int rc = 0;
 
 	switch (rxmode->mq_mode) {
-	case ETH_MQ_RX_NONE:
+	case RTE_ETH_MQ_RX_NONE:
 		/* No special checks are required */
 		break;
-	case ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_RSS:
 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
 			sfc_err(sa, "RSS is not available");
 			rc = EINVAL;
@@ -1665,16 +1665,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 	 * so unsupported offloads cannot be added as the result of
 	 * below check.
 	 */
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-	    (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-		rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-		rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 	}
 
 	return rc;
@@ -1820,7 +1820,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	}
 
 configure_rss:
-	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
 	if (rss->channels > 0) {
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 13392cdd5a09..0273788c20ce 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -54,23 +54,23 @@ sfc_tx_get_offload_mask(struct sfc_adapter *sa)
 	uint64_t no_caps = 0;
 
 	if (!encp->enc_hw_tx_insert_vlan_enabled)
-		no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if (!encp->enc_tunnel_encapsulations_supported)
-		no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 	if (!sa->tso)
-		no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
 
 	if (!sa->tso_encap ||
 	    (encp->enc_tunnel_encapsulations_supported &
 	     (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
-		no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+		no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 
 	return ~no_caps;
 }
@@ -114,8 +114,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
@@ -309,7 +309,7 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
 	int rc = 0;
 
 	switch (txmode->mq_mode) {
-	case ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_NONE:
 		break;
 	default:
 		sfc_err(sa, "Tx multi-queue mode %u not supported",
@@ -529,23 +529,23 @@ sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
 	if (rc != 0)
 		goto fail_ev_qstart;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_IPV4;
 
-	if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+	if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 		flags |= EFX_TXQ_CKSUM_INNER_IPV4;
 
-	if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-	    (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+	if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+	    (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
 		flags |= EFX_TXQ_CKSUM_TCPUDP;
 
-		if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+		if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
 			flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
 	}
 
-	if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-				  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-				  DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+	if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+				  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+				  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
 		flags |= EFX_TXQ_FATSOV2;
 
 	rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@@ -876,9 +876,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/*
 		 * Here VLAN TCI is expected to be zero in case if no
-		 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
 		 * if the calling app ignores the absence of
-		 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+		 * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
 		 * TX_ERROR will occur
 		 */
 		pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -1242,13 +1242,13 @@ struct sfc_dp_tx sfc_efx_tx = {
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_TX_EFX,
 	},
 	.features		= 0,
-	.dev_offload_capa	= DEV_TX_OFFLOAD_VLAN_INSERT |
-				  DEV_TX_OFFLOAD_MULTI_SEGS,
-	.queue_offload_capa	= DEV_TX_OFFLOAD_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_UDP_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_CKSUM |
-				  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-				  DEV_TX_OFFLOAD_TCP_TSO,
+	.dev_offload_capa	= RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				  RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+	.queue_offload_capa	= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				  RTE_ETH_TX_OFFLOAD_TCP_TSO,
 	.qsize_up_rings		= sfc_efx_tx_qsize_up_rings,
 	.qcreate		= sfc_efx_tx_qcreate,
 	.qdestroy		= sfc_efx_tx_qdestroy,
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b3b55b9035b1..3ef33818a9e0 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -173,7 +173,7 @@ pmd_dev_start(struct rte_eth_dev *dev)
 		return status;
 
 	/* Link UP */
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	return 0;
 }
@@ -184,7 +184,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
 	struct pmd_internals *p = dev->data->dev_private;
 
 	/* Link DOWN */
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	/* Firmware */
 	softnic_pipeline_disable_all(p);
@@ -386,10 +386,10 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
 
 	/* dev->data */
 	dev->data->dev_private = dev_private;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
-	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	dev->data->mac_addrs = &eth_addr;
 	dev->data->promiscuous = 1;
 	dev->data->numa_node = params->cpu_id;
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 3c6a285e3c5e..6a084e3e1b1b 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1042,7 +1042,7 @@ static int
 eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_dev_data *data = dev->data;
-	if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+	if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
 		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
 		data->scattered_rx = 1;
 	} else {
@@ -1064,11 +1064,11 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = internals->max_rx_queues;
 	dev_info->max_tx_queues = internals->max_tx_queues;
 	dev_info->min_rx_bufsize = 0;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = 0;
 	dev_info->rx_queue_offload_capa = 0;
 	dev_info->tx_queue_offload_capa = 0;
-	dev_info->speed_capa = ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1202,10 +1202,10 @@ eth_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
-	link.link_speed = ETH_SPEED_NUM_100G;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_status = ETH_LINK_UP;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_speed = RTE_ETH_SPEED_NUM_100G;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	rte_eth_linkstatus_set(dev, &link);
 	return 0;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e4f1ad45219e..5d5350d78e03 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -70,16 +70,16 @@
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |	\
-			DEV_RX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_RX_OFFLOAD_UDP_CKSUM |	\
-			DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |	\
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
 
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |	\
-			DEV_TX_OFFLOAD_IPV4_CKSUM |	\
-			DEV_TX_OFFLOAD_UDP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_CKSUM |	\
-			DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |	\
+			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+			RTE_ETH_TX_OFFLOAD_TCP_TSO)
 
 static int tap_devices_count;
 
@@ -97,10 +97,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;	/* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-	.link_speed = ETH_SPEED_NUM_10G,
-	.link_duplex = ETH_LINK_FULL_DUPLEX,
-	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_FIXED,
+	.link_speed = RTE_ETH_SPEED_NUM_10G,
+	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+	.link_status = RTE_ETH_LINK_DOWN,
+	.link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		len = readv(process_private->rxq_fds[rxq->queue_id],
 			*rxq->iovecs,
-			1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+			1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
 			     rxq->nb_rx_desc : 1));
 		if (len < (int)sizeof(struct tun_pi))
 			break;
@@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		seg->next = NULL;
 		mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
 						      RTE_PTYPE_ALL_MASK);
-		if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 			tap_verify_csum(mbuf);
 
 		/* account for the receive frame */
@@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev)
 	struct pmd_internals *pmd = dev->data->dev_private;
 	struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-	dev->data->dev_link.link_status = ETH_LINK_UP;
+	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 	return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
@@ -956,30 +956,30 @@ tap_dev_speed_capa(void)
 	uint32_t speed = pmd_link.link_speed;
 	uint32_t capa = 0;
 
-	if (speed >= ETH_SPEED_NUM_10M)
-		capa |= ETH_LINK_SPEED_10M;
-	if (speed >= ETH_SPEED_NUM_100M)
-		capa |= ETH_LINK_SPEED_100M;
-	if (speed >= ETH_SPEED_NUM_1G)
-		capa |= ETH_LINK_SPEED_1G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_2_5G;
-	if (speed >= ETH_SPEED_NUM_5G)
-		capa |= ETH_LINK_SPEED_5G;
-	if (speed >= ETH_SPEED_NUM_10G)
-		capa |= ETH_LINK_SPEED_10G;
-	if (speed >= ETH_SPEED_NUM_20G)
-		capa |= ETH_LINK_SPEED_20G;
-	if (speed >= ETH_SPEED_NUM_25G)
-		capa |= ETH_LINK_SPEED_25G;
-	if (speed >= ETH_SPEED_NUM_40G)
-		capa |= ETH_LINK_SPEED_40G;
-	if (speed >= ETH_SPEED_NUM_50G)
-		capa |= ETH_LINK_SPEED_50G;
-	if (speed >= ETH_SPEED_NUM_56G)
-		capa |= ETH_LINK_SPEED_56G;
-	if (speed >= ETH_SPEED_NUM_100G)
-		capa |= ETH_LINK_SPEED_100G;
+	if (speed >= RTE_ETH_SPEED_NUM_10M)
+		capa |= RTE_ETH_LINK_SPEED_10M;
+	if (speed >= RTE_ETH_SPEED_NUM_100M)
+		capa |= RTE_ETH_LINK_SPEED_100M;
+	if (speed >= RTE_ETH_SPEED_NUM_1G)
+		capa |= RTE_ETH_LINK_SPEED_1G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_2_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_5G)
+		capa |= RTE_ETH_LINK_SPEED_5G;
+	if (speed >= RTE_ETH_SPEED_NUM_10G)
+		capa |= RTE_ETH_LINK_SPEED_10G;
+	if (speed >= RTE_ETH_SPEED_NUM_20G)
+		capa |= RTE_ETH_LINK_SPEED_20G;
+	if (speed >= RTE_ETH_SPEED_NUM_25G)
+		capa |= RTE_ETH_LINK_SPEED_25G;
+	if (speed >= RTE_ETH_SPEED_NUM_40G)
+		capa |= RTE_ETH_LINK_SPEED_40G;
+	if (speed >= RTE_ETH_SPEED_NUM_50G)
+		capa |= RTE_ETH_LINK_SPEED_50G;
+	if (speed >= RTE_ETH_SPEED_NUM_56G)
+		capa |= RTE_ETH_LINK_SPEED_56G;
+	if (speed >= RTE_ETH_SPEED_NUM_100G)
+		capa |= RTE_ETH_LINK_SPEED_100G;
 
 	return capa;
 }
@@ -1196,15 +1196,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
 		tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
 		if (!(ifr.ifr_flags & IFF_UP) ||
 		    !(ifr.ifr_flags & IFF_RUNNING)) {
-			dev_link->link_status = ETH_LINK_DOWN;
+			dev_link->link_status = RTE_ETH_LINK_DOWN;
 			return 0;
 		}
 	}
 	tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
 	dev_link->link_status =
 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-		 ETH_LINK_UP :
-		 ETH_LINK_DOWN);
+		 RTE_ETH_LINK_UP :
+		 RTE_ETH_LINK_DOWN);
 	return 0;
 }
 
@@ -1391,7 +1391,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 	int ret;
 
 	/* initialize GSO context */
-	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
 	if (!pmd->gso_ctx_mp) {
 		/*
 		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1606,9 +1606,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->csum = !!(offloads &
-			(DEV_TX_OFFLOAD_IPV4_CKSUM |
-			 DEV_TX_OFFLOAD_UDP_CKSUM |
-			 DEV_TX_OFFLOAD_TCP_CKSUM));
+			(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
@@ -1760,7 +1760,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	fc_conf->mode = RTE_FC_NONE;
+	fc_conf->mode = RTE_ETH_FC_NONE;
 	return 0;
 }
 
@@ -1768,7 +1768,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
 		  struct rte_eth_fc_conf *fc_conf)
 {
-	if (fc_conf->mode != RTE_FC_NONE)
+	if (fc_conf->mode != RTE_ETH_FC_NONE)
 		return -ENOTSUP;
 	return 0;
 }
@@ -2262,7 +2262,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
 			}
 		}
 	}
-	pmd_link.link_speed = ETH_SPEED_NUM_10G;
+	pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
 	TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2436,7 +2436,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
 		return 0;
 	}
 
-	speed = ETH_SPEED_NUM_10G;
+	speed = RTE_ETH_SPEED_NUM_10G;
 
 	/* use tap%d which causes kernel to choose next available */
 	strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 176e7180bdaa..48c151cf6b68 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -13,7 +13,7 @@
 #define TAP_RSS_HASH_KEY_SIZE 40
 
 /* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
 
 /* hashed fields for RSS */
 enum hash_field {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 8ce9a99dc074..762647e3b6ee 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic,
 {
 	memset(link, 0, sizeof(*link));
 
-	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+	link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
 	if (nic->duplex == NICVF_HALF_DUPLEX)
-		link->link_duplex = ETH_LINK_HALF_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
-		link->link_duplex = ETH_LINK_FULL_DUPLEX;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_AUTONEG;
+	link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 }
 
 static void
@@ -134,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		/* rte_eth_link_get() might need to wait up to 9 seconds */
 		for (i = 0; i < MAX_CHECK_TIME; i++) {
 			nicvf_link_status_update(nic, &link);
-			if (link.link_status == ETH_LINK_UP)
+			if (link.link_status == RTE_ETH_LINK_UP)
 				break;
 			rte_delay_ms(CHECK_INTERVAL);
 		}
@@ -390,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
 {
 	uint64_t nic_rss = 0;
 
-	if (ethdev_rss & ETH_RSS_IPV4)
+	if (ethdev_rss & RTE_ETH_RSS_IPV4)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_IPV6)
+	if (ethdev_rss & RTE_ETH_RSS_IPV6)
 		nic_rss |= RSS_IP_ENA;
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
 
-	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
 
-	if (ethdev_rss & ETH_RSS_PORT)
+	if (ethdev_rss & RTE_ETH_RSS_PORT)
 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
-		if (ethdev_rss & ETH_RSS_VXLAN)
+		if (ethdev_rss & RTE_ETH_RSS_VXLAN)
 			nic_rss |= RSS_TUN_VXLAN_ENA;
 
-		if (ethdev_rss & ETH_RSS_GENEVE)
+		if (ethdev_rss & RTE_ETH_RSS_GENEVE)
 			nic_rss |= RSS_TUN_GENEVE_ENA;
 
-		if (ethdev_rss & ETH_RSS_NVGRE)
+		if (ethdev_rss & RTE_ETH_RSS_NVGRE)
 			nic_rss |= RSS_TUN_NVGRE_ENA;
 	}
 
@@ -431,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
 	uint64_t ethdev_rss = 0;
 
 	if (nic_rss & RSS_IP_ENA)
-		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+		ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
-				ETH_RSS_NONFRAG_IPV6_TCP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP);
 
 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
-		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
-				ETH_RSS_NONFRAG_IPV6_UDP);
+		ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP);
 
 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
-		ethdev_rss |= ETH_RSS_PORT;
+		ethdev_rss |= RTE_ETH_RSS_PORT;
 
 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
 		if (nic_rss & RSS_TUN_VXLAN_ENA)
-			ethdev_rss |= ETH_RSS_VXLAN;
+			ethdev_rss |= RTE_ETH_RSS_VXLAN;
 
 		if (nic_rss & RSS_TUN_GENEVE_ENA)
-			ethdev_rss |= ETH_RSS_GENEVE;
+			ethdev_rss |= RTE_ETH_RSS_GENEVE;
 
 		if (nic_rss & RSS_TUN_NVGRE_ENA)
-			ethdev_rss |= ETH_RSS_NVGRE;
+			ethdev_rss |= RTE_ETH_RSS_NVGRE;
 	}
 	return ethdev_rss;
 }
@@ -479,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				reta_conf[i].reta[j] = tbl[j];
 	}
@@ -509,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev,
 		return ret;
 
 	/* Copy RETA table */
-	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
-		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
 			if ((reta_conf[i].mask >> j) & 0x01)
 				tbl[j] = reta_conf[i].reta[j];
 	}
@@ -807,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
 		    dev->data->nb_rx_queues,
 		    dev->data->dev_conf.lpbk_mode, rsshf);
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
 		ret = nicvf_rss_term(nic);
-	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
 	if (ret)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -870,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -992,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 	txq->offloads = offloads;
 
-	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1382,11 +1382,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Autonegotiation may be disabled */
-	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
-	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
-				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+				 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
-		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
 
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 	dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
@@ -1415,10 +1415,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
-			DEV_TX_OFFLOAD_UDP_CKSUM          |
-			DEV_TX_OFFLOAD_TCP_CKSUM,
+		.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM          |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
 	};
 
 	return 0;
@@ -1582,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
@@ -1711,7 +1711,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	/* Setup scatter mode if needed by jumbo */
 	if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU */
@@ -1896,8 +1896,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
-		rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (!rte_eal_has_hugepages()) {
 		PMD_INIT_LOG(INFO, "Huge page is not configured");
@@ -1909,8 +1909,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
@@ -1920,7 +1920,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
 		return -EINVAL;
 	}
@@ -1955,7 +1955,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		nic->offload_cksum = 1;
 
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
@@ -2032,8 +2032,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	rxmode = &dev->data->dev_conf.rxmode;
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			nicvf_vlan_hw_strip(nic, true);
 		else
 			nicvf_vlan_hw_strip(nic, false);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 5d38750d6313..cb474e26b81e 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -16,32 +16,32 @@
 #define NICVF_UNKNOWN_DUPLEX		0xff
 
 #define NICVF_RSS_OFFLOAD_PASS1 ( \
-	ETH_RSS_PORT | \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_PORT | \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
 
 #define NICVF_TX_OFFLOAD_CAPA ( \
-	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
-	DEV_TX_OFFLOAD_UDP_CKSUM        | \
-	DEV_TX_OFFLOAD_TCP_CKSUM        | \
-	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
-	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
-	DEV_TX_OFFLOAD_MULTI_SEGS)
+	RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       | \
+	RTE_ETH_TX_OFFLOAD_UDP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_TCP_CKSUM        | \
+	RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define NICVF_RX_OFFLOAD_CAPA ( \
-	DEV_RX_OFFLOAD_CHECKSUM    | \
-	DEV_RX_OFFLOAD_VLAN_STRIP  | \
-	DEV_RX_OFFLOAD_SCATTER     | \
-	DEV_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_CHECKSUM    | \
+	RTE_ETH_RX_OFFLOAD_VLAN_STRIP  | \
+	RTE_ETH_RX_OFFLOAD_SCATTER     | \
+	RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 7b46ffb68635..0b0f9db7cb2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -998,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 			!(rxcfg & TXGBE_RXCFG_VLAN);
 		rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1033,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 	switch (vlan_type) {
-	case ETH_VLAN_TYPE_INNER:
+	case RTE_ETH_VLAN_TYPE_INNER:
 		if (vlan_ext) {
 			wr32m(hw, TXGBE_VLANCTL,
 				TXGBE_VLANCTL_TPID_MASK,
@@ -1053,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 				TXGBE_TAGTPID_LSB(tpid));
 		}
 		break;
-	case ETH_VLAN_TYPE_OUTER:
+	case RTE_ETH_VLAN_TYPE_OUTER:
 		if (vlan_ext) {
 			/* Only the high 16-bits is valid */
 			wr32m(hw, TXGBE_EXTAG,
@@ -1138,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
 	if (on) {
 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	} else {
 		rxq->vlan_flags = PKT_RX_VLAN;
-		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 }
 
@@ -1240,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			txgbe_vlan_strip_queue_set(dev, i, 1);
 		else
 			txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1254,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	struct txgbe_rx_queue *rxq;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		rxmode = &dev->data->dev_conf.rxmode;
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 		else
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 			}
 	}
 }
@@ -1275,25 +1275,25 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_rxmode *rxmode;
 	rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & ETH_VLAN_STRIP_MASK)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		txgbe_vlan_hw_strip_config(dev);
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			txgbe_vlan_hw_filter_enable(dev);
 		else
 			txgbe_vlan_hw_filter_disable(dev);
 	}
 
-	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			txgbe_vlan_hw_extend_enable(dev);
 		else
 			txgbe_vlan_hw_extend_disable(dev);
 	}
 
-	if (mask & ETH_QINQ_STRIP_MASK) {
-		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 			txgbe_qinq_hw_strip_enable(dev);
 		else
 			txgbe_qinq_hw_strip_disable(dev);
@@ -1331,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
@@ -1357,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
 			break;
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_INIT_LOG(ERR, "SRIOV active,"
 					" unsupported mq_mode rx %d.",
 					dev_conf->rxmode.mq_mode);
 			return -EINVAL;
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
 					PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1378,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 					return -EINVAL;
 				}
 			break;
-		case ETH_MQ_RX_VMDQ_ONLY:
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode =
-				ETH_MQ_RX_VMDQ_ONLY;
+				RTE_ETH_MQ_RX_VMDQ_ONLY;
 			break;
-		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
 			PMD_INIT_LOG(ERR, "SRIOV is active,"
 					" wrong mq_mode rx %d.",
@@ -1393,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+		case RTE_ETH_MQ_TX_VMDQ_DCB:
+			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
 			break;
-		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
 			dev->data->dev_conf.txmode.mq_mode =
-				ETH_MQ_TX_VMDQ_ONLY;
+				RTE_ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
@@ -1414,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 			return -EINVAL;
 		}
 	} else {
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
 					  " not supported.");
 			return -EINVAL;
 		}
 		/* check configuration for vmdb+dcb mode */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1429,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools must be %d or %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1446,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
 				return -EINVAL;
 			}
 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-			       conf->nb_queue_pools == ETH_32_POOLS)) {
+			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
 						" nb_queue_pools != %d and"
 						" nb_queue_pools != %d.",
-						ETH_16_POOLS, ETH_32_POOLS);
+						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
 				return -EINVAL;
 			}
 		}
 
 		/* For DCB mode check our configuration before we go further */
-		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
 
-		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-			if (!(conf->nb_tcs == ETH_4_TCS ||
-			       conf->nb_tcs == ETH_8_TCS)) {
+			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+			       conf->nb_tcs == RTE_ETH_8_TCS)) {
 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
 						" and nb_tcs != %d.",
-						ETH_4_TCS, ETH_8_TCS);
+						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
 				return -EINVAL;
 			}
 		}
@@ -1495,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/* multiple queue mode checking */
 	ret  = txgbe_check_mq_mode(dev);
@@ -1694,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbe_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
 		goto error;
 	}
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
 		/* Enable vlan filtering for VMDq */
 		txgbe_vmdq_vlan_hw_filter_enable(dev);
 	}
@@ -1763,8 +1763,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	if (err)
 		goto error;
 
-	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-			ETH_LINK_SPEED_10G;
+	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
 
 	link_speeds = &dev->data->dev_conf.link_speeds;
 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
@@ -1773,20 +1773,20 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	}
 
 	speed = 0x0;
-	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
 	} else {
-		if (*link_speeds & ETH_LINK_SPEED_10G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_2_5G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_1G)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
-		if (*link_speeds & ETH_LINK_SPEED_100M)
+		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
 	}
 
@@ -2601,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2634,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_desc_lim = tx_desc_lim;
 
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
 	/* Driver-preferred Rx/Tx parameters */
 	dev_info->default_rxportconf.burst_size = 32;
@@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	int wait = 1;
 
 	memset(&link, 0, sizeof(link));
-	link.link_status = ETH_LINK_DOWN;
-	link.link_speed = ETH_SPEED_NUM_NONE;
-	link.link_duplex = ETH_LINK_HALF_DUPLEX;
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-			ETH_LINK_SPEED_FIXED);
+			RTE_ETH_LINK_AUTONEG);
 
 	hw->mac.get_link_status = true;
 
@@ -2713,8 +2713,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
 	if (err != 0) {
-		link.link_speed = ETH_SPEED_NUM_100M;
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 		return rte_eth_linkstatus_set(dev, &link);
 	}
 
@@ -2733,34 +2733,34 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 	}
 
 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-	link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
 	switch (link_speed) {
 	default:
 	case TXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_100M_FULL:
-		link.link_speed = ETH_SPEED_NUM_100M;
+		link.link_speed = RTE_ETH_SPEED_NUM_100M;
 		break;
 
 	case TXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_1G;
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		break;
 
 	case TXGBE_LINK_SPEED_2_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_2_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_5GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_5G;
+		link.link_speed = RTE_ETH_SPEED_NUM_5G;
 		break;
 
 	case TXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_SPEED_NUM_10G;
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
 		break;
 	}
 
@@ -2990,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 					(int)(dev->data->port_id),
 					(unsigned int)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex");
 	} else {
 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3221,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		tx_pause = 0;
 
 	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
+		fc_conf->mode = RTE_ETH_FC_FULL;
 	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
 	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
 	else
-		fc_conf->mode = RTE_FC_NONE;
+		fc_conf->mode = RTE_ETH_FC_NONE;
 
 	return 0;
 }
@@ -3359,16 +3359,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3400,16 +3400,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
 			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < reta_size; i += 4) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
 		if (!mask)
 			continue;
@@ -3576,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 		return -ENOTSUP;
 
 	if (on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = ~0;
 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
 		}
 	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
 			uta_info->uta_shadow[i] = 0;
 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
 		}
@@ -3605,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
 	uint32_t new_val = orig_val;
 
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		new_val |= TXGBE_POOLETHCTL_UTA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		new_val |= TXGBE_POOLETHCTL_MCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		new_val |= TXGBE_POOLETHCTL_UCHA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		new_val |= TXGBE_POOLETHCTL_BCA;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		new_val |= TXGBE_POOLETHCTL_MCP;
 
 	return new_val;
@@ -4264,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 
 	switch (link.link_speed) {
-	case ETH_SPEED_NUM_100M:
+	case RTE_ETH_SPEED_NUM_100M:
 		incval = TXGBE_INCVAL_100;
 		shift = TXGBE_INCVAL_SHIFT_100;
 		break;
-	case ETH_SPEED_NUM_1G:
+	case RTE_ETH_SPEED_NUM_1G:
 		incval = TXGBE_INCVAL_1GB;
 		shift = TXGBE_INCVAL_SHIFT_1GB;
 		break;
-	case ETH_SPEED_NUM_10G:
+	case RTE_ETH_SPEED_NUM_10G:
 	default:
 		incval = TXGBE_INCVAL_10GB;
 		shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4628,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	uint8_t nb_tcs;
 	uint8_t i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
 	else
 		dcb_info->nb_tcs = 1;
@@ -4639,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	if (dcb_config->vt_mode) { /* vt is enabled */
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < nb_tcs; j++) {
@@ -4663,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 	} else { /* vt is disabled */
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		if (dcb_info->nb_tcs == ETH_4_TCS) {
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4678,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4908,7 +4908,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
 		break;
 	default:
@@ -4939,7 +4939,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
-	case RTE_L2_TUNNEL_TYPE_E_TAG:
+	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
 		break;
 	default:
@@ -4979,7 +4979,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4987,7 +4987,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
 			ret = -EINVAL;
@@ -4995,7 +4995,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5003,7 +5003,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		if (udp_tunnel->udp_port == 0) {
 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
 			ret = -EINVAL;
@@ -5035,7 +5035,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	switch (udp_tunnel->prot_type) {
-	case RTE_TUNNEL_TYPE_VXLAN:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5045,7 +5045,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_VXLANPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5055,7 +5055,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_GENEVEPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_TEREDO:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5065,7 +5065,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 		}
 		wr32(hw, TXGBE_TEREDOPORT, 0);
 		break;
-	case RTE_TUNNEL_TYPE_VXLAN_GPE:
+	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
 		if (cur_port != udp_tunnel->udp_port) {
 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index fd65d89ffe7d..8304b68292da 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -60,15 +60,15 @@
 #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
 #define TXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 43dc0ed39b75..283b52e8f3db 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -486,14 +486,14 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
-	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -574,22 +574,22 @@ txgbevf_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
-	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #else
-	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	}
 #endif
 
@@ -647,8 +647,8 @@ txgbevf_dev_start(struct rte_eth_dev *dev)
 	txgbevf_set_vfta_all(dev, 1);
 
 	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-		ETH_VLAN_EXTEND_MASK;
+	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		RTE_ETH_VLAN_EXTEND_MASK;
 	err = txgbevf_vlan_offload_config(dev, mask);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
@@ -891,10 +891,10 @@ txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
-	if (mask & ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
+			on = !!(rxq->offloads &	RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 			txgbevf_vlan_strip_queue_set(dev, i, on);
 		}
 	}
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8abb86228608..e303d87176ed 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -102,22 +102,22 @@ txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
  * flexbytes matching field, and drop queue (only for perfect matching mode).
  */
 static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
 		     uint32_t *fdirctrl, uint32_t *flex)
 {
 	*fdirctrl = 0;
 	*flex = 0;
 
 	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
+	case RTE_ETH_FDIR_PBALLOC_64K:
 		/* 8k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
 		break;
-	case RTE_FDIR_PBALLOC_128K:
+	case RTE_ETH_FDIR_PBALLOC_128K:
 		/* 16k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
 		break;
-	case RTE_FDIR_PBALLOC_256K:
+	case RTE_ETH_FDIR_PBALLOC_256K:
 		/* 32k - 1 signature filters */
 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
 		break;
@@ -521,15 +521,15 @@ txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
 
 static uint32_t
 atr_compute_perfect_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
@@ -564,15 +564,15 @@ txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
  */
 static uint32_t
 atr_compute_signature_hash(struct txgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
+		enum rte_eth_fdir_pballoc_type pballoc)
 {
 	uint32_t bucket_hash, sig_hash;
 
 	bucket_hash = txgbe_atr_compute_hash(input,
 				TXGBE_ATR_BUCKET_HASH_KEY);
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
 	else
 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index eae400b14176..6d7fd1842843 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1215,7 +1215,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
 	/**
 	 * grp and e_cid_base are bit fields and only use 14 bits.
 	 * e-tag id is taken as little endian by HW.
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index ccd747973ba2..445733f3ba46 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -372,7 +372,7 @@ txgbe_crypto_create_session(void *device,
 	aead_xform = &conf->crypto_xform->aead;
 
 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
@@ -380,7 +380,7 @@ txgbe_crypto_create_session(void *device,
 			return -ENOTSUP;
 		}
 	} else {
-		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 			ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
 		} else {
 			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
@@ -611,11 +611,11 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	/* sanity checks */
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -634,7 +634,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= TXGBE_SECRXCTL_CRCSTRIP;
 	wr32(hw, TXGBE_SECRXCTL, reg);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
 		reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
 		if (reg != 0) {
@@ -642,7 +642,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
 		wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
 		reg = rd32(hw, TXGBE_SECTXCTL);
 		if (reg != TXGBE_SECTXCTL_STFWD) {
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index a48972b1a381..30be2873307a 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -101,15 +101,15 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
 
-	if (vf_num >= ETH_32_POOLS) {
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 	} else {
 		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
 
 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -256,13 +256,13 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 
 	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 		break;
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 		break;
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	}
@@ -611,29 +611,29 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 	/* Notify VF of number of DCB traffic classes */
 	eth_conf = &eth_dev->data->dev_conf;
 	switch (eth_conf->txmode.mq_mode) {
-	case ETH_MQ_TX_NONE:
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
 		PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 			", but its tx mode = %d\n", vf,
 			eth_conf->txmode.mq_mode);
 		return -1;
 
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
-		case ETH_16_POOLS:
-			num_tcs = ETH_8_TCS;
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
 			break;
-		case ETH_32_POOLS:
-			num_tcs = ETH_4_TCS;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
 			break;
 		default:
 			return -1;
 		}
 		break;
 
-	/* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
-	case ETH_MQ_TX_VMDQ_ONLY:
+	/* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
 		hw = TXGBE_DEV_HW(eth_dev);
 		vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 		vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 7e18dcce0a86..1204dc5499a5 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1960,7 +1960,7 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
-	return DEV_RX_OFFLOAD_VLAN_STRIP;
+	return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 }
 
 uint64_t
@@ -1970,34 +1970,34 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
 
-	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
-		   DEV_RX_OFFLOAD_UDP_CKSUM   |
-		   DEV_RX_OFFLOAD_TCP_CKSUM   |
-		   DEV_RX_OFFLOAD_KEEP_CRC    |
-		   DEV_RX_OFFLOAD_VLAN_FILTER |
-		   DEV_RX_OFFLOAD_RSS_HASH |
-		   DEV_RX_OFFLOAD_SCATTER;
+	offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH |
+		   RTE_ETH_RX_OFFLOAD_SCATTER;
 
 	if (!txgbe_is_vf(dev))
-		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
-			     DEV_RX_OFFLOAD_QINQ_STRIP |
-			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+		offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 
 	/*
 	 * RSC is only supported by PF devices in a non-SR-IOV
 	 * mode.
 	 */
 	if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
-		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
 	if (hw->mac.type == txgbe_mac_raptor)
-		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+		offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
 
-	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		offloads |= DEV_RX_OFFLOAD_SECURITY;
+		offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 #endif
 
 	return offloads;
@@ -2222,32 +2222,32 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	uint64_t tx_offload_capa;
 
 	tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_UDP_TSO	   |
-		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
-		DEV_TX_OFFLOAD_IP_TNL_TSO	|
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
-		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
-		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
-		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
+		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (!txgbe_is_vf(dev))
-		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
-	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-			   DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+	tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
 	if (dev->security_ctx)
-		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
 #endif
 	return tx_offload_capa;
 }
@@ -2349,7 +2349,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIB_SECURITY
 	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
-			DEV_TX_OFFLOAD_SECURITY);
+			RTE_ETH_TX_OFFLOAD_SECURITY);
 #endif
 
 	/* Modification to set tail pointer for virtual function
@@ -2599,7 +2599,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
 	else
 		rxq->crc_len = 0;
@@ -2900,20 +2900,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2930,20 +2930,20 @@ txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		mrqc &= ~TXGBE_RACTL_RSSMASK;
-		if (rss_hf & ETH_RSS_IPV4)
+		if (rss_hf & RTE_ETH_RSS_IPV4)
 			mrqc |= TXGBE_RACTL_RSSIPV4;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 			mrqc |= TXGBE_RACTL_RSSIPV4TCP;
-		if (rss_hf & ETH_RSS_IPV6 ||
-		    rss_hf & ETH_RSS_IPV6_EX)
+		if (rss_hf & RTE_ETH_RSS_IPV6 ||
+		    rss_hf & RTE_ETH_RSS_IPV6_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
-		    rss_hf & ETH_RSS_IPV6_TCP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6TCP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 			mrqc |= TXGBE_RACTL_RSSIPV4UDP;
-		if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
-		    rss_hf & ETH_RSS_IPV6_UDP_EX)
+		if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+		    rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
 			mrqc |= TXGBE_RACTL_RSSIPV6UDP;
 
 		if (rss_hf)
@@ -2984,39 +2984,39 @@ txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (hw->mac.type == txgbe_mac_raptor_vf) {
 		mrqc = rd32(hw, TXGBE_VFPLCFG);
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
 			rss_hf = 0;
 	} else {
 		mrqc = rd32(hw, TXGBE_RACTL);
 		if (mrqc & TXGBE_RACTL_RSSIPV4)
-			rss_hf |= ETH_RSS_IPV4;
+			rss_hf |= RTE_ETH_RSS_IPV4;
 		if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6)
-			rss_hf |= ETH_RSS_IPV6 |
-				  ETH_RSS_IPV6_EX;
+			rss_hf |= RTE_ETH_RSS_IPV6 |
+				  RTE_ETH_RSS_IPV6_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
-				  ETH_RSS_IPV6_TCP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				  RTE_ETH_RSS_IPV6_TCP_EX;
 		if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 		if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
-			rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
-				  ETH_RSS_IPV6_UDP_EX;
+			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				  RTE_ETH_RSS_IPV6_UDP_EX;
 		if (!(mrqc & TXGBE_RACTL_RSSENA))
 			rss_hf = 0;
 	}
@@ -3046,7 +3046,7 @@ txgbe_rss_configure(struct rte_eth_dev *dev)
 	 */
 	if (adapter->rss_reta_updated == 0) {
 		reta = 0;
-		for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+		for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 			if (j == dev->data->nb_rx_queues)
 				j = 0;
 			reta = (reta >> 8) | LS32(j, 24, 0xFF);
@@ -3083,12 +3083,12 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
 	num_pools = cfg->nb_queue_pools;
 	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+	if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
 		txgbe_rss_disable(dev);
 		return;
 	}
 	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+	nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
 
 	/*
 	 * split rx buffer up into sections, each for 1 traffic class
@@ -3103,7 +3103,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
 
 		rxpbsize &= (~(0x3FF << 10));
@@ -3111,7 +3111,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 	}
 
-	if (num_pools == ETH_16_POOLS) {
+	if (num_pools == RTE_ETH_16_POOLS) {
 		mrqc = TXGBE_PORTCTL_NUMTC_8;
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 	} else {
@@ -3130,7 +3130,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_POOLCTL, vt_ctl);
 
 	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		/*
 		 * mapping is done with 3 bits per priority,
 		 * so shift by i*3 each time
@@ -3151,7 +3151,7 @@ txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_POOLRXENA(0),
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	wr32(hw, TXGBE_ETHADDRIDX, 0);
 	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
@@ -3221,7 +3221,7 @@ txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
 	/*PF VF Transmit Enable*/
 	wr32(hw, TXGBE_POOLTXENA(0),
 		vmdq_tx_conf->nb_queue_pools ==
-				ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+				RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
 
 	/*Configure general DCB TX parameters*/
 	txgbe_dcb_tx_hw_config(dev, dcb_config);
@@ -3237,12 +3237,12 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3252,7 +3252,7 @@ txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3270,12 +3270,12 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	uint8_t i, j;
 
 	/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
 	} else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
 	}
 
 	/* Initialize User Priority to Traffic Class mapping */
@@ -3285,7 +3285,7 @@ txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = vmdq_tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3312,7 +3312,7 @@ txgbe_dcb_rx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = rx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
@@ -3339,7 +3339,7 @@ txgbe_dcb_tx_config(struct rte_eth_dev *dev,
 	}
 
 	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		j = tx_conf->dcb_tc[i];
 		tc = &dcb_config->tc_config[j];
 		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
@@ -3475,7 +3475,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
 
 	switch (dev->data->dev_conf.rxmode.mq_mode) {
-	case ETH_MQ_RX_VMDQ_DCB:
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/*
@@ -3486,8 +3486,8 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		/*Configure general VMDQ and DCB RX parameters*/
 		txgbe_vmdq_dcb_configure(dev);
 		break;
-	case ETH_MQ_RX_DCB:
-	case ETH_MQ_RX_DCB_RSS:
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
 		dcb_config->vt_mode = false;
 		config_dcb_rx = DCB_RX_CONFIG;
 		/* Get dcb TX configuration parameters from rte_eth_conf */
@@ -3500,7 +3500,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
 		dcb_config->vt_mode = true;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB and VT TX configuration parameters
@@ -3511,7 +3511,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
 		break;
 
-	case ETH_MQ_TX_DCB:
+	case RTE_ETH_MQ_TX_DCB:
 		dcb_config->vt_mode = false;
 		config_dcb_tx = DCB_TX_CONFIG;
 		/* get DCB TX configuration parameters from rte_eth_conf */
@@ -3527,15 +3527,15 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	nb_tcs = dcb_config->num_tcs.pfc_tcs;
 	/* Unpack map */
 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
-	if (nb_tcs == ETH_4_TCS) {
+	if (nb_tcs == RTE_ETH_4_TCS) {
 		/* Avoid un-configured priority mapping to TC0 */
 		uint8_t j = 4;
 		uint8_t mask = 0xFF;
 
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
 			mask = (uint8_t)(mask & (~(1 << map[i])));
 		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
-			if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+			if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
 				map[j++] = i;
 			mask >>= 1;
 		}
@@ -3576,7 +3576,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
 
 		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			wr32(hw, TXGBE_PBRXSIZE(i), 0);
 	}
 	if (config_dcb_tx) {
@@ -3592,7 +3592,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
 		}
 		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 			wr32(hw, TXGBE_PBTXSIZE(i), 0);
 			wr32(hw, TXGBE_PBTXDMATH(i), 0);
 		}
@@ -3634,7 +3634,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
 	/* Check if the PFC is supported */
-	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
 		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
 		for (i = 0; i < nb_tcs; i++) {
 			/* If the TC count is 8,
@@ -3648,7 +3648,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 			tc->pfc = txgbe_dcb_pfc_enabled;
 		}
 		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+		if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 			pfc_en &= 0x0F;
 		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
 	}
@@ -3719,12 +3719,12 @@ void txgbe_configure_dcb(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* check support mq_mode for DCB */
-	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
-	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+	if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+	    dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
 		return;
 
-	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
 		return;
 
 	/** Configure DCB hardware **/
@@ -3780,7 +3780,7 @@ txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 
 	/* pool enabling for receive - 64 */
 	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
+	if (num_pools == RTE_ETH_64_POOLS)
 		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
 
 	/*
@@ -3904,11 +3904,11 @@ txgbe_config_vf_rss(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
@@ -3931,15 +3931,15 @@ txgbe_config_vf_default(struct rte_eth_dev *dev)
 	mrqc = rd32(hw, TXGBE_PORTCTL);
 	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
 	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
+	case RTE_ETH_64_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_64;
 		break;
 
-	case ETH_32_POOLS:
+	case RTE_ETH_32_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_32;
 		break;
 
-	case ETH_16_POOLS:
+	case RTE_ETH_16_POOLS:
 		mrqc |= TXGBE_PORTCTL_NUMVT_16;
 		break;
 	default:
@@ -3962,21 +3962,21 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * any DCB/RSS w/o VMDq multi-queue setting
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_DCB_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_rss_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 
-		case ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
 			txgbe_vmdq_rx_hw_configure(dev);
 			break;
 
-		case ETH_MQ_RX_NONE:
+		case RTE_ETH_MQ_RX_NONE:
 		default:
 			/* if mq_mode is none, disable rss mode.*/
 			txgbe_rss_disable(dev);
@@ -3987,18 +3987,18 @@ txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 		 * Support RSS together with SRIOV.
 		 */
 		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
 			txgbe_config_vf_rss(dev);
 			break;
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_DCB:
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
 		/* In SRIOV, the configuration is the same as VMDq case */
 			txgbe_vmdq_dcb_configure(dev);
 			break;
 		/* DCB/RSS together with SRIOV is not supported */
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-		case ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
 			PMD_INIT_LOG(ERR,
 				"Could not support DCB/RSS with VMDq & SRIOV");
 			return -1;
@@ -4028,7 +4028,7 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV inactive scheme
 		 * any DCB w/o VMDq multi-queue setting
 		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+		if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
 			txgbe_vmdq_tx_hw_configure(hw);
 		else
 			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
@@ -4038,13 +4038,13 @@ txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 		 * SRIOV active scheme
 		 * FIXME if support DCB together with VMDq & SRIOV
 		 */
-		case ETH_64_POOLS:
+		case RTE_ETH_64_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_64;
 			break;
-		case ETH_32_POOLS:
+		case RTE_ETH_32_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_32;
 			break;
-		case ETH_16_POOLS:
+		case RTE_ETH_16_POOLS:
 			mtqc = TXGBE_PORTCTL_NUMVT_16;
 			break;
 		default:
@@ -4107,10 +4107,10 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* Sanity check */
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4118,22 +4118,22 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration */
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
-	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+	     (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
 				    "is disabled");
 		return -EINVAL;
 	}
 
 	rfctl = rd32(hw, TXGBE_PSRCTL);
-	if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		rfctl &= ~TXGBE_PSRCTL_RSCDIA;
 	else
 		rfctl |= TXGBE_PSRCTL_RSCDIA;
 	wr32(hw, TXGBE_PSRCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set PSRCTL.RSCACK bit */
@@ -4273,7 +4273,7 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_SECURITY);
+				RTE_ETH_RX_OFFLOAD_SECURITY);
 	}
 #endif
 }
@@ -4316,7 +4316,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
 	else
 		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4344,7 +4344,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4354,7 +4354,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
 		else
 			rxq->crc_len = 0;
@@ -4391,11 +4391,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4410,7 +4410,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = rd32(hw, TXGBE_PSRCTL);
 	rxcsum |= TXGBE_PSRCTL_PCSD;
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= TXGBE_PSRCTL_L4CSUM;
 	else
 		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
@@ -4419,7 +4419,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	if (hw->mac.type == txgbe_mac_raptor) {
 		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
-		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
 		else
 			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
@@ -4542,8 +4542,8 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		txgbe_setup_loopback_link_raptor(hw);
 
 #ifdef RTE_LIB_SECURITY
-	if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
-	    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+	if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+	    (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
 		ret = txgbe_crypto_enable_ipsec(dev);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR,
@@ -4851,7 +4851,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	 * Assume no header split and no VLAN strip support
 	 * on any Rx queue first .
 	 */
-	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	/* Set PSR type for VF RSS according to max Rx queue */
 	psrtype = TXGBE_VFPLCFG_PSRL4HDR |
@@ -4903,7 +4903,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		 */
 		wr32(hw, TXGBE_RXCFG(i), srrctl);
 
-		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
 		    (dev->data->mtu + TXGBE_ETH_OVERHEAD +
 				2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
@@ -4912,8 +4912,8 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 			dev->data->scattered_rx = 1;
 		}
 
-		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 	/*
@@ -5084,7 +5084,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
 	 * little-endian order.
 	 */
 	reta = 0;
-	for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+	for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
 		if (j == conf->conf.queue_num)
 			j = 0;
 		reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b96f58a3f848..27d4c842c0e7 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -309,7 +309,7 @@ struct txgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
-	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	uint64_t	    offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
@@ -392,7 +392,7 @@ struct txgbe_tx_queue {
 	uint8_t             pthresh;       /**< Prefetch threshold register. */
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint64_t            offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3abe3959eb1a..3171be73d05d 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -118,14 +118,14 @@ txgbe_tc_nb_get(struct rte_eth_dev *dev)
 	uint8_t nb_tcs = 0;
 
 	eth_conf = &dev->data->dev_conf;
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
+		    RTE_ETH_32_POOLS)
+			nb_tcs = RTE_ETH_4_TCS;
 		else
-			nb_tcs = ETH_8_TCS;
+			nb_tcs = RTE_ETH_8_TCS;
 	} else {
 		nb_tcs = 1;
 	}
@@ -364,10 +364,10 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 	if (vf_num) {
 		/* no DCB */
 		if (nb_tcs == 1) {
-			if (vf_num >= ETH_32_POOLS) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
 				*nb = 2;
 				*base = vf_num * 2;
-			} else if (vf_num >= ETH_16_POOLS) {
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
 				*nb = 4;
 				*base = vf_num * 4;
 			} else {
@@ -381,7 +381,7 @@ txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 		}
 	} else {
 		/* VT off */
-		if (nb_tcs == ETH_8_TCS) {
+		if (nb_tcs == RTE_ETH_8_TCS) {
 			switch (tc_node_no) {
 			case 0:
 				*base = 0;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 86498365e149..17b6a1a1ceec 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -125,8 +125,8 @@ static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
-		.link_duplex = ETH_LINK_FULL_DUPLEX,
-		.link_status = ETH_LINK_DOWN
+		.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+		.link_status = RTE_ETH_LINK_DOWN
 };
 
 struct rte_vhost_vring_state {
@@ -817,7 +817,7 @@ new_device(int vid)
 
 	rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
 	rte_atomic32_set(&internal->dev_attached, 1);
 	update_queuing_status(eth_dev);
@@ -852,7 +852,7 @@ destroy_device(int vid)
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev);
 
-	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
 		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -1118,7 +1118,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
 	if (vhost_driver_setup(dev) < 0)
 		return -1;
 
-	internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -1267,9 +1267,9 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = internal->max_queues;
 	dev_info->min_rx_bufsize = 0;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				DEV_TX_OFFLOAD_VLAN_INSERT;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ddf0e26ab4db..94120b349023 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -712,7 +712,7 @@ int
 virtio_dev_close(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1774,7 +1774,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
-	if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+	if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
 		if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
 			config = &local_config;
 			virtio_read_dev_config(hw,
@@ -1788,7 +1788,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
 		}
 	}
 	if (hw->duplex == DUPLEX_UNKNOWN)
-		hw->duplex = ETH_LINK_FULL_DUPLEX;
+		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
 		hw->speed, hw->duplex);
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
@@ -1887,7 +1887,7 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct virtio_hw *hw = eth_dev->data->dev_private;
-	uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
 	int vectorized = 0;
 	int ret;
 
@@ -1958,22 +1958,22 @@ static uint32_t
 virtio_dev_speed_capa_get(uint32_t speed)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -2089,14 +2089,14 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "configure");
 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Rx multi queue mode %d",
 			rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported Tx multi queue mode %d",
 			txmode->mq_mode);
@@ -2114,20 +2114,20 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 
 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 
-	if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
-			   DEV_TX_OFFLOAD_TCP_CKSUM))
+	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
 
-	if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 		req_features |=
 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
@@ -2139,15 +2139,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return ret;
 	}
 
-	if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			    DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
 		PMD_DRV_LOG(ERR,
 			"rx checksum not available on this host");
 		return -ENOTSUP;
 	}
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
 		PMD_DRV_LOG(ERR,
@@ -2159,12 +2159,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
 		virtio_dev_cq_start(dev);
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		hw->vlan_strip = 1;
 
-	hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 		PMD_DRV_LOG(ERR,
 			    "vlan filtering not available on this host");
@@ -2217,7 +2217,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 				PMD_DRV_LOG(INFO,
 					"disabled packed ring vectorized rx for TCP_LRO enabled");
 				hw->use_vec_rx = 0;
@@ -2244,10 +2244,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 				hw->use_vec_rx = 0;
 			}
 
-			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_CKSUM |
-					   DEV_RX_OFFLOAD_TCP_LRO |
-					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
+					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
 				PMD_DRV_LOG(INFO,
 					"disabled split ring vectorized rx for offloading enabled");
 				hw->use_vec_rx = 0;
@@ -2440,7 +2440,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct rte_eth_link link;
-	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
 
 	PMD_INIT_LOG(DEBUG, "stop");
 	dev->data->dev_started = 0;
@@ -2481,28 +2481,28 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 	memset(&link, 0, sizeof(link));
 	link.link_duplex = hw->duplex;
 	link.link_speed  = hw->speed;
-	link.link_autoneg = ETH_LINK_AUTONEG;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
 
 	if (!hw->started) {
-		link.link_status = ETH_LINK_DOWN;
-		link.link_speed = ETH_SPEED_NUM_NONE;
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
 		virtio_read_dev_config(hw,
 				offsetof(struct virtio_net_config, status),
 				&status, sizeof(status));
 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-			link.link_status = ETH_LINK_DOWN;
-			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 			PMD_INIT_LOG(DEBUG, "Port %d is down",
 				     dev->data->port_id);
 		} else {
-			link.link_status = ETH_LINK_UP;
+			link.link_status = RTE_ETH_LINK_UP;
 			PMD_INIT_LOG(DEBUG, "Port %d is up",
 				     dev->data->port_id);
 		}
 	} else {
-		link.link_status = ETH_LINK_UP;
+		link.link_status = RTE_ETH_LINK_UP;
 	}
 
 	return rte_eth_linkstatus_set(dev, &link);
@@ -2515,8 +2515,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct virtio_hw *hw = dev->data->dev_private;
 	uint64_t offloads = rxmode->offloads;
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
 
 			PMD_DRV_LOG(NOTICE,
@@ -2526,8 +2526,8 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		}
 	}
 
-	if (mask & ETH_VLAN_STRIP_MASK)
-		hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
+		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
 	return 0;
 }
@@ -2549,32 +2549,32 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = hw->max_mtu;
 
 	host_features = VIRTIO_OPS(hw)->get_features(hw);
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
 		dev_info->rx_offload_capa |=
-			DEV_RX_OFFLOAD_TCP_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM;
+			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
 	}
 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
 
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-				    DEV_TX_OFFLOAD_VLAN_INSERT;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
 		dev_info->tx_offload_capa |=
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 	}
 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
 	if ((host_features & tso_mask) == tso_mask)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 
 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
 		/*
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a19895af1f17..26d9edf5319c 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -41,20 +41,20 @@
 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
 
 #define VMXNET3_TX_OFFLOAD_CAP		\
-	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
-	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_TX_OFFLOAD_TCP_TSO |	\
-	 DEV_TX_OFFLOAD_MULTI_SEGS)
+	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
+	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 
 #define VMXNET3_RX_OFFLOAD_CAP		\
-	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
-	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
-	 DEV_RX_OFFLOAD_SCATTER |	\
-	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
-	 DEV_RX_OFFLOAD_TCP_LRO |	\
-	 DEV_RX_OFFLOAD_RSS_HASH)
+	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
+	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
+	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
+	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
+	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
+	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
 int vmxnet3_segs_dynfield_offset = -1;
 
@@ -398,9 +398,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* set the initial link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(eth_dev, &link);
 
 	return 0;
@@ -486,8 +486,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
@@ -547,7 +547,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	hw->queueDescPA = mz->iova;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Allocate memory structure for UPT1_RSSConf and configure */
 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
 				      "rss_conf", rte_socket_id(),
@@ -843,15 +843,15 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
 		devRead->misc.maxNumRxSG = 0;
 	}
 
-	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS)
 			return ret;
@@ -863,7 +863,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 	}
 
 	ret = vmxnet3_dev_vlan_offload_set(dev,
-			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
 	if (ret)
 		return ret;
 
@@ -930,7 +930,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (VMXNET3_VERSION_GE_4(hw) &&
-	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
 		/* Check for additional RSS  */
 		ret = vmxnet3_v4_rss_configure(dev);
 		if (ret != VMXNET3_SUCCESS) {
@@ -1039,9 +1039,9 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 
 	/* Clear recorded link status */
 	memset(&link, 0, sizeof(link));
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 	rte_eth_linkstatus_set(dev, &link);
 
 	hw->adapter_stopped = 1;
@@ -1365,7 +1365,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
 	dev_info->min_mtu = VMXNET3_MIN_MTU;
 	dev_info->max_mtu = VMXNET3_MAX_MTU;
-	dev_info->speed_capa = ETH_LINK_SPEED_10G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
 
 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -1447,10 +1447,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (ret & 0x1)
-		link.link_status = ETH_LINK_UP;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	link.link_speed = ETH_SPEED_NUM_10G;
-	link.link_autoneg = ETH_LINK_FIXED;
+		link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	link.link_speed = RTE_ETH_SPEED_NUM_10G;
+	link.link_autoneg = RTE_ETH_LINK_FIXED;
 
 	return rte_eth_linkstatus_set(dev, &link);
 }
@@ -1503,7 +1503,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1573,8 +1573,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1583,8 +1583,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 				       VMXNET3_CMD_UPDATE_FEATURE);
 	}
 
-	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 8950175460f0..ef858ac9512f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -32,18 +32,18 @@
 				VMXNET3_MAX_RX_QUEUES + 1)
 
 #define VMXNET3_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 #define VMXNET3_V4_RSS_MASK ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP)
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #define VMXNET3_MANDATORY_V4_RSS ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP)
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 
 /* RSS configuration structure - shared with device through GPA */
 typedef struct VMXNET3_RSSConf {
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index b01c4c01f9c9..870100fa4f11 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1326,13 +1326,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	rss_hf = port_rss_conf->rss_hf &
 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1389,13 +1389,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
 	/* loading hashType */
 	dev_rss_conf->hashType = 0;
 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-	if (rss_hf & ETH_RSS_IPV4)
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-	if (rss_hf & ETH_RSS_IPV6)
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
 	return VMXNET3_SUCCESS;
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index a26076b312e5..ecafc5e4f1a9 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -70,11 +70,11 @@ mbuf_input(struct rte_mbuf *mbuf)
 
 static const struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -327,7 +327,7 @@ check_port_link_status(uint16_t port_id)
 
 		if (link_get_err >= 0 && link.link_status) {
 			const char *dp = (link.link_duplex ==
-				ETH_LINK_FULL_DUPLEX) ?
+				RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex";
 			printf("\nPort %u Link Up - speed %s - %s\n",
 				port_id,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index fd8fd767c811..1087b0dad125 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -114,17 +114,17 @@ static struct rte_mempool *mbuf_pool;
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -148,9 +148,9 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
@@ -240,9 +240,9 @@ bond_port_init(struct rte_mempool *mbuf_pool)
 			"Error during getting device (port %u) info: %s\n",
 			BOND_PORT, strerror(-retval));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
 	if (retval != 0)
 		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 8c4a8feec0c2..c681e237ea46 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,15 +80,15 @@ struct app_stats prev_app_stats;
 
 static const struct rte_eth_conf port_conf_default = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		}
 	},
 };
@@ -126,9 +126,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 1bc675962bf3..cdd9e9b60bd8 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -98,7 +98,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
 	int ret;
 
 	memset(&cfg_port, 0, sizeof(cfg_port));
-	cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+	cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
 
 	for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
 		struct app_port *ptr_port = &app_cfg->ports[idx_port];
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 413251630709..e7cdf8d5775b 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -233,13 +233,13 @@ rte_ethtool_get_pauseparam(uint16_t port_id,
 	pause_param->tx_pause = 0;
 	pause_param->rx_pause = 0;
 	switch (fc_conf.mode) {
-	case RTE_FC_RX_PAUSE:
+	case RTE_ETH_FC_RX_PAUSE:
 		pause_param->rx_pause = 1;
 		break;
-	case RTE_FC_TX_PAUSE:
+	case RTE_ETH_FC_TX_PAUSE:
 		pause_param->tx_pause = 1;
 		break;
-	case RTE_FC_FULL:
+	case RTE_ETH_FC_FULL:
 		pause_param->rx_pause = 1;
 		pause_param->tx_pause = 1;
 	default:
@@ -277,14 +277,14 @@ rte_ethtool_set_pauseparam(uint16_t port_id,
 
 	if (pause_param->tx_pause) {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_FULL;
+			fc_conf.mode = RTE_ETH_FC_FULL;
 		else
-			fc_conf.mode = RTE_FC_TX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
 	} else {
 		if (pause_param->rx_pause)
-			fc_conf.mode = RTE_FC_RX_PAUSE;
+			fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
 		else
-			fc_conf.mode = RTE_FC_NONE;
+			fc_conf.mode = RTE_ETH_FC_NONE;
 	}
 
 	status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
@@ -398,12 +398,12 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
 	for (vf = 0; vf < num_vfs; vf++) {
 #ifdef RTE_NET_IXGBE
 		rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
-			ETH_VMDQ_ACCEPT_UNTAG, 0);
+			RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
 #endif
 	}
 
 	/* Enable Rx vlan filter, VF unspport status is discard */
-	ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+	ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
 	if (ret != 0)
 		return ret;
 
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index e26be8edf28f..193a16463449 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -283,13 +283,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -311,12 +311,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 476b147bdfcc..1b841d46ad93 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -614,13 +614,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 	struct rte_eth_rxconf rx_conf;
 	static const struct rte_eth_conf port_conf_default = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
-				.rss_hf = ETH_RSS_IP |
-					  ETH_RSS_TCP |
-					  ETH_RSS_UDP,
+				.rss_hf = RTE_ETH_RSS_IP |
+					  RTE_ETH_RSS_TCP |
+					  RTE_ETH_RSS_UDP,
 			}
 		}
 	};
@@ -642,9 +642,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	rx_conf = dev_info.default_rxconf;
 	rx_conf.offloads = port_conf.rxmode.offloads;
 
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index 8a43f6ac0f92..6185b340600c 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -212,9 +212,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index dd8a33d036ee..bfc1949c8428 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -113,7 +113,7 @@ assert_link_status(void)
 	memset(&link, 0, sizeof(link));
 	do {
 		link_get_err = rte_eth_link_get(port_id, &link);
-		if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+		if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
 			break;
 		rte_delay_ms(CHECK_INTERVAL);
 	} while (--rep_cnt);
@@ -121,7 +121,7 @@ assert_link_status(void)
 	if (link_get_err < 0)
 		rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
 			 rte_strerror(-link_get_err));
-	if (link.link_status == ETH_LINK_DOWN)
+	if (link.link_status == RTE_ETH_LINK_DOWN)
 		rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
 }
 
@@ -138,12 +138,12 @@ init_port(void)
 		},
 		.txmode = {
 			.offloads =
-				DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO,
+				RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+				RTE_ETH_TX_OFFLOAD_TCP_TSO,
 		},
 	};
 	struct rte_eth_txconf txq_conf;
diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index ccfee585f850..b1aa2767a0af 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -819,12 +819,12 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	/* Configuring port to use RSS for multiple RX queues. 8< */
 	static const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 		.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_PROTO_MASK,
+				.rss_hf = RTE_ETH_RSS_PROTO_MASK,
 			}
 		}
 	};
@@ -852,9 +852,9 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 		dev_info.flow_type_rss_offloads;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Cannot configure device:"
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index d51133199c42..4ffe997baf23 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -148,13 +148,13 @@ static struct rte_eth_conf port_conf = {
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
-			     DEV_RX_OFFLOAD_SCATTER),
+		.offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+			     RTE_ETH_RX_OFFLOAD_SCATTER),
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -623,7 +623,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 9ba02e687adb..0290767af473 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -45,7 +45,7 @@ link_next(struct link *link)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -57,12 +57,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -77,11 +77,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -139,7 +139,7 @@ link_create(const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -157,9 +157,9 @@ link_create(const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -267,5 +267,5 @@ link_is_up(const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 06dc42799314..41e35593867b 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -160,22 +160,22 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
 			RTE_ETHER_CRC_LEN,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 			.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_MULTI_SEGS),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
 	},
 };
 
@@ -737,7 +737,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -1095,9 +1095,9 @@ main(int argc, char **argv)
 		n_tx_queue = nb_lcores;
 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index a10e330f5003..1c60ac28e317 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -233,19 +233,19 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1444,10 +1444,10 @@ print_usage(const char *prgname)
 		"               \"parallel\" : Parallel\n"
 		"  --" CMD_LINE_OPT_RX_OFFLOAD
 		": bitmask of the RX HW offload capabilities to enable/use\n"
-		"                         (DEV_RX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_TX_OFFLOAD
 		": bitmask of the TX HW offload capabilities to enable/use\n"
-		"                         (DEV_TX_OFFLOAD_*)\n"
+		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
 		": max number of entries in reassemble(fragment) table\n"
 		"    (zero (default value) disables reassembly)\n"
@@ -1898,7 +1898,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2201,8 +2201,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 	local_port_conf.rxmode.mtu = mtu_size;
 
 	if (multi_seg_required()) {
-		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 	}
 
 	local_port_conf.rxmode.offloads |= req_rx_offloads;
@@ -2225,12 +2225,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 			portid, local_port_conf.txmode.offloads,
 			dev_info.tx_offload_capa);
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
-		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
 	printf("port %u configurng rx_offloads=0x%" PRIx64
 		", tx_offloads=0x%" PRIx64 "\n",
@@ -2288,7 +2288,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 		/* Pre-populate pkt offloads based on capabilities */
 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
-		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
 
 		tx_queueid++;
@@ -2649,7 +2649,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
 	struct rte_flow *flow;
 	int ret;
 
-	if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
 		return;
 
 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 17a28556c971..5cdd794f017f 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -986,7 +986,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	if (inbound) {
 		if ((dev_info.rx_offload_capa &
-				DEV_RX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware RX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -994,7 +994,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 
 	} else { /* outbound */
 		if ((dev_info.tx_offload_capa &
-				DEV_TX_OFFLOAD_SECURITY) == 0) {
+				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
 			RTE_LOG(WARNING, PORT,
 				"hardware TX IPSec offload is not supported\n");
 			return -EINVAL;
@@ -1628,7 +1628,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
 	}
 
 	/* Check for outbound rules that use offloads and use this port */
@@ -1639,7 +1639,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
 				rule_type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
 				&& rule->portid == port_id)
-			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
 	}
 	return 0;
 }
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 73391ce1a96d..bdcaa3bcd1ca 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -114,8 +114,8 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 	},
 };
 
@@ -619,7 +619,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 69a0afced6cc..d324ee224109 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -94,7 +94,7 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
 /* Options for configuring ethernet port */
 static struct rte_eth_conf port_conf = {
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,9 +607,9 @@ init_port(uint16_t port)
 			"Error during getting device (port %u) info: %s\n",
 			port, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
@@ -687,7 +687,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 6e2016752fca..04a3bdace20c 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -215,11 +215,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1807,7 +1807,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2631,9 +2631,9 @@ initialize_ports(struct l2fwd_crypto_options *options)
 			return retval;
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (retval < 0) {
 			printf("Cannot configure device: err=%d, port=%u\n",
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index 9040be5ed9b6..cf3d1b8aaf40 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -14,7 +14,7 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 			.split_hdr_size = 0,
 		},
 		.txmode = {
-			.mq_mode = ETH_MQ_TX_NONE,
+			.mq_mode = RTE_ETH_MQ_TX_NONE,
 		},
 	};
 	uint16_t nb_ports_available = 0;
@@ -22,9 +22,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 	int ret;
 
 	if (rsrc->event_mode) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
-		port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+		port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
 	}
 
 	/* Initialise each port */
@@ -60,9 +60,9 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
 		}
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queue. 8< */
 		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1db89f2bd139..9806204b81d1 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -395,7 +395,7 @@ check_all_ports_link_status(struct l2fwd_resources *rsrc,
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 62981663ea78..d8eabe4c869e 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -93,7 +93,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -725,7 +725,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -868,9 +868,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index af59d51b3ec4..78fc48f781fc 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -82,7 +82,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -477,7 +477,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -649,9 +649,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 8feb50e0f542..c9d8d4918a34 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -94,7 +94,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -605,7 +605,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -791,9 +791,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure the number of queues for a port. */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 410ec94b4131..1fb180723582 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -123,19 +123,19 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
-				ETH_RSS_TCP | ETH_RSS_SCTP,
+			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -1935,7 +1935,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2003,7 +2003,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2087,9 +2087,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 05385807e83e..7f00c65609ed 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,17 +111,17 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 				.rss_key = NULL,
-				.rss_hf = ETH_RSS_IP,
+				.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -607,7 +607,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* Clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -731,7 +731,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -828,9 +828,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 39624993b081..21c79567b1f7 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -249,18 +249,18 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_RSS,
+		.mq_mode        = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_UDP,
+			.rss_hf = RTE_ETH_RSS_UDP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	}
 };
 
@@ -2196,7 +2196,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -2509,7 +2509,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -2637,9 +2637,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 961860ea18ef..7c7613a83aad 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -75,9 +75,9 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
 			rte_panic("Error during getting device (port %u) info:"
 				  "%s\n", port_id, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+						RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 						dev_info.flow_type_rss_offloads;
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 202ef78b6e95..5dd3e4136ea1 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -119,18 +119,18 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -902,7 +902,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -987,7 +987,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -1052,15 +1052,15 @@ l3fwd_poll_resource_setup(void)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
 
 		if (dev_info.max_rx_queues == 1)
-			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
 
 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index ce8ae059d789..551f0524da79 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -82,7 +82,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.intr_conf = {
 		.lsc = 1, /**< lsc interrupt feature enabled */
@@ -146,7 +146,7 @@ print_stats(void)
 			   link_get_err < 0 ? "0" :
 			   rte_eth_link_speed_to_str(link.link_speed),
 			   link_get_err < 0 ? "Link get failed" :
-			   (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+			   (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
 					"full-duplex" : "half-duplex"),
 			   port_statistics[portid].tx,
 			   port_statistics[portid].rx,
@@ -506,7 +506,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -633,9 +633,9 @@ main(int argc, char **argv)
 				"Error during getting device (port %u) info: %s\n",
 				portid, strerror(-ret));
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 		/* Configure RX and TX queues. 8< */
 		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 		if (ret < 0)
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index be669c2bcc06..a4d7a3e5436a 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -93,7 +93,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	const struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS
+			.mq_mode = RTE_ETH_MQ_RX_RSS
 		}
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_clients;
@@ -212,7 +212,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index a66328ba0caf..b35886a77b00 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -175,18 +175,18 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 {
 	struct rte_eth_conf port_conf = {
 			.rxmode = {
-				.mq_mode	= ETH_MQ_RX_RSS,
+				.mq_mode	= RTE_ETH_MQ_RX_RSS,
 				.split_hdr_size = 0,
-				.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+				.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 			},
 			.rx_adv_conf = {
 				.rss_conf = {
 					.rss_key = NULL,
-					.rss_hf = ETH_RSS_IP,
+					.rss_hf = RTE_ETH_RSS_IP,
 				},
 			},
 			.txmode = {
-				.mq_mode = ETH_MQ_TX_NONE,
+				.mq_mode = RTE_ETH_MQ_TX_NONE,
 			}
 	};
 	const uint16_t rx_rings = num_queues, tx_rings = num_queues;
@@ -217,9 +217,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
 
 	info.default_rxconf.rx_drop_en = 1;
 
-	if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -391,7 +391,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index e9a388710647..f110fc129f55 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -89,17 +89,17 @@ static uint16_t pkt_burst = NTB_DFLT_PKT_BURST;
 
 static struct rte_eth_conf eth_port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 4f6982bc1289..b01ac60fd196 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -294,9 +294,9 @@ configure_eth_port(uint16_t port_id)
 		return ret;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
 	if (ret != 0)
 		return ret;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 74e016e1d20d..3a6a33bda3b0 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -306,18 +306,18 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_RSS,
+		.mq_mode = RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_TCP,
+			.rss_hf = RTE_ETH_RSS_TCP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -3437,7 +3437,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
@@ -3490,7 +3490,7 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
 	conf->rxmode.mtu = max_pkt_len - overhead_len;
 
 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
-		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	return 0;
 }
@@ -3589,9 +3589,9 @@ main(int argc, char **argv)
 				"Invalid max packet length: %u (port %u)\n",
 				max_pkt_len, portid);
 
-		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 			local_port_conf.txmode.offloads |=
-				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
 			dev_info.flow_type_rss_offloads;
diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c
index 4f20dfc4be06..569207a79d62 100644
--- a/examples/pipeline/obj.c
+++ b/examples/pipeline/obj.c
@@ -133,7 +133,7 @@ mempool_find(struct obj *obj, const char *name)
 static struct rte_eth_conf port_conf_default = {
 	.link_speeds = 0,
 	.rxmode = {
-		.mq_mode = ETH_MQ_RX_NONE,
+		.mq_mode = RTE_ETH_MQ_RX_NONE,
 		.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
 		.split_hdr_size = 0, /* Header split buffer size */
 	},
@@ -145,12 +145,12 @@ static struct rte_eth_conf port_conf_default = {
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.lpbk_mode = 0,
 };
 
-#define RETA_CONF_SIZE     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE     (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
 
 static int
 rss_setup(uint16_t port_id,
@@ -165,11 +165,11 @@ rss_setup(uint16_t port_id,
 	memset(reta_conf, 0, sizeof(reta_conf));
 
 	for (i = 0; i < reta_size; i++)
-		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+		reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
 	for (i = 0; i < reta_size; i++) {
-		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
-		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
 		uint32_t rss_qs_pos = i % rss->n_queues;
 
 		reta_conf[reta_id].reta[reta_pos] =
@@ -227,7 +227,7 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	rss = params->rx.rss;
 	if (rss) {
 		if ((port_info.reta_size == 0) ||
-			(port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+			(port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
 			return NULL;
 
 		if ((rss->n_queues == 0) ||
@@ -245,9 +245,9 @@ link_create(struct obj *obj, const char *name, struct link_params *params)
 	/* Port */
 	memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
 	if (rss) {
-		port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
 		port_conf.rx_adv_conf.rss_conf.rss_hf =
-			(ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+			(RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
 			port_info.flow_type_rss_offloads;
 	}
 
@@ -356,7 +356,7 @@ link_is_up(struct obj *obj, const char *name)
 	if (rte_eth_link_get(link->port_id, &link_params) < 0)
 		return 0;
 
-	return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+	return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
 }
 
 struct link *
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 229a277032cb..979d9eb9e9d0 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -193,14 +193,14 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Force full Tx path in the driver, required for IEEE1588 */
-	port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+	port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index c32d2e12e633..743bae2da50a 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -51,18 +51,18 @@ static struct rte_mempool *pool = NULL;
  ***/
 static struct rte_eth_conf port_conf = {
 	.rxmode = {
-		.mq_mode	= ETH_MQ_RX_RSS,
+		.mq_mode	= RTE_ETH_MQ_RX_RSS,
 		.split_hdr_size = 0,
-		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
+		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
 	},
 	.rx_adv_conf = {
 		.rss_conf = {
 			.rss_key = NULL,
-			.rss_hf = ETH_RSS_IP,
+			.rss_hf = RTE_ETH_RSS_IP,
 		},
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -332,8 +332,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_rx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
@@ -378,8 +378,8 @@ main(int argc, char **argv)
 			"Error during getting device (port %u) info: %s\n",
 			port_tx, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-		conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+		conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
 	if (conf.rx_adv_conf.rss_conf.rss_hf !=
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 1367569c65db..9b34e4a76b1b 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -60,7 +60,7 @@ static struct rte_eth_conf port_conf = {
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_DCB_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 };
 
@@ -105,9 +105,9 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 			"Error during getting device (port %u) info: %s\n",
 			portid, strerror(-ret));
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		local_port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE,
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 6845c396b8d9..1903d8b095a1 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -141,17 +141,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	if (hw_timestamping) {
-		if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+		if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
 			printf("\nERROR: Port %u does not support hardware timestamping\n"
 					, port);
 			return -1;
 		}
-		port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+		port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 		rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
 		if (hwts_dynfield_offset < 0) {
 			printf("ERROR: Failed to register timestamp field\n");
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index a19934dbe0c8..0e5e3b5a9815 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -95,7 +95,7 @@ init_port(uint16_t port_num)
 	/* for port configuration all features are off by default */
 	struct rte_eth_conf port_conf = {
 		.rxmode = {
-			.mq_mode = ETH_MQ_RX_RSS,
+			.mq_mode = RTE_ETH_MQ_RX_RSS,
 		},
 	};
 	const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -114,9 +114,9 @@ init_port(uint16_t port_num)
 	if (retval != 0)
 		return retval;
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/*
 	 * Standard DPDK port initialisation - config port, then set up
@@ -276,7 +276,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 				continue;
 			}
 			/* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index fd7207aee758..16435ee3ccc2 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -49,9 +49,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 97218917067e..44376417f83d 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -110,23 +110,23 @@ static int nb_sockets;
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 		/*
 		 * VLAN strip is necessary for 1G NIC such as I350,
 		 * this fixes bug of ipv4 forwarding in guest can't
 		 * forward pakets from one virtio dev to another virtio dev.
 		 */
-		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+		.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
-		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
-			     DEV_TX_OFFLOAD_TCP_CKSUM |
-			     DEV_TX_OFFLOAD_VLAN_INSERT |
-			     DEV_TX_OFFLOAD_MULTI_SEGS |
-			     DEV_TX_OFFLOAD_TCP_TSO),
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
+		.offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+			     RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+			     RTE_ETH_TX_OFFLOAD_TCP_TSO),
 	},
 	.rx_adv_conf = {
 		/*
@@ -134,7 +134,7 @@ static struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -291,9 +291,9 @@ port_init(uint16_t port)
 		return -1;
 
 	rx_rings = (uint16_t)dev_info.max_rx_queues;
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	/* Configure ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 	if (retval != 0) {
@@ -557,8 +557,8 @@ us_vhost_parse_args(int argc, char **argv)
 		case 'P':
 			promiscuous = 1;
 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-				ETH_VMDQ_ACCEPT_BROADCAST |
-				ETH_VMDQ_ACCEPT_MULTICAST;
+				RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+				RTE_ETH_VMDQ_ACCEPT_MULTICAST;
 			break;
 
 		case OPT_VM2VM_NUM:
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index e19d79a40802..b159291d77ce 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -73,9 +73,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	/* Configure the Ethernet device. */
 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -270,7 +270,7 @@ check_all_ports_link_status(uint32_t port_mask)
 				continue;
 			}
 		       /* clear all_ports_up flag if any link down */
-			if (link.link_status == ETH_LINK_DOWN) {
+			if (link.link_status == RTE_ETH_LINK_DOWN) {
 				all_ports_up = 0;
 				break;
 			}
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 85996bf864b7..feee642f594d 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -65,12 +65,12 @@ static uint8_t rss_enable;
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
 		.split_hdr_size = 0,
 	},
 
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_NONE,
+		.mq_mode = RTE_ETH_MQ_TX_NONE,
 	},
 	.rx_adv_conf = {
 		/*
@@ -78,7 +78,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
 		 * appropriate values
 		 */
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_8_POOLS,
+			.nb_queue_pools = RTE_ETH_8_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -156,11 +156,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -258,9 +258,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
 	if (retval != 0)
 		return retval;
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index be0179fdeaf0..d2218f2cf741 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -59,8 +59,8 @@ static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports;
 
 /* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs   num_tcs   = RTE_ETH_4_TCS;
 static uint16_t num_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
 static uint8_t rss_enable;
@@ -68,11 +68,11 @@ static uint8_t rss_enable;
 /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
 static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	.rxmode = {
-		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
+		.mq_mode        = RTE_ETH_MQ_RX_VMDQ_DCB,
 		.split_hdr_size = 0,
 	},
 	.txmode = {
-		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
+		.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
 	},
 	/*
 	 * should be overridden separately in code with
@@ -80,7 +80,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	 */
 	.rx_adv_conf = {
 		.vmdq_dcb_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -88,12 +88,12 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 			.dcb_tc = {0},
 		},
 		.dcb_rx_conf = {
-				.nb_tcs = ETH_4_TCS,
+				.nb_tcs = RTE_ETH_4_TCS,
 				/** Traffic class each UP mapped to. */
 				.dcb_tc = {0},
 		},
 		.vmdq_rx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.enable_default_pool = 0,
 			.default_pool = 0,
 			.nb_pool_maps = 0,
@@ -102,7 +102,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
 	},
 	.tx_adv_conf = {
 		.vmdq_dcb_tx_conf = {
-			.nb_queue_pools = ETH_32_POOLS,
+			.nb_queue_pools = RTE_ETH_32_POOLS,
 			.dcb_tc = {0},
 		},
 	},
@@ -156,7 +156,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 		conf.pool_map[i].pools = 1UL << i;
 		vmdq_conf.pool_map[i].pools = 1UL << i;
 	}
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
 		conf.dcb_tc[i] = i % num_tcs;
 		dcb_conf.dcb_tc[i] = i % num_tcs;
 		tx_conf.dcb_tc[i] = i % num_tcs;
@@ -172,11 +172,11 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
 			  sizeof(tx_conf)));
 	if (rss_enable) {
-		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
-		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
-							ETH_RSS_UDP |
-							ETH_RSS_TCP |
-							ETH_RSS_SCTP;
+		eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+							RTE_ETH_RSS_UDP |
+							RTE_ETH_RSS_TCP |
+							RTE_ETH_RSS_SCTP;
 	}
 	return 0;
 }
@@ -270,9 +270,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 		return retval;
 	}
 
-	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
 		port_conf.txmode.offloads |=
-			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -381,9 +381,9 @@ vmdq_parse_num_pools(const char *q_arg)
 	if (n != 16 && n != 32)
 		return -1;
 	if (n == 16)
-		num_pools = ETH_16_POOLS;
+		num_pools = RTE_ETH_16_POOLS;
 	else
-		num_pools = ETH_32_POOLS;
+		num_pools = RTE_ETH_32_POOLS;
 
 	return 0;
 }
@@ -403,9 +403,9 @@ vmdq_parse_num_tcs(const char *q_arg)
 	if (n != 4 && n != 8)
 		return -1;
 	if (n == 4)
-		num_tcs = ETH_4_TCS;
+		num_tcs = RTE_ETH_4_TCS;
 	else
-		num_tcs = ETH_8_TCS;
+		num_tcs = RTE_ETH_8_TCS;
 
 	return 0;
 }
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index b530ac6e320a..dcbffd4265fa 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -114,7 +114,7 @@ struct rte_eth_dev_data {
 	/** Device Ethernet link address. @see rte_eth_dev_release_port() */
 	struct rte_ether_addr *mac_addrs;
 	/** Bitmap associating MAC addresses to pools */
-	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 	/**
 	 * Device Ethernet MAC addresses of hash filtering.
 	 * @see rte_eth_dev_release_port()
@@ -1700,23 +1700,23 @@ struct rte_eth_syn_filter {
 /**
  * filter type of tunneling packet
  */
-#define ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_IVLAN | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
-					ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
-					ETH_TUNNEL_FILTER_TENID | \
-					ETH_TUNNEL_FILTER_IMAC)
+#define RTE_ETH_TUNNEL_FILTER_OMAC  0x01 /**< filter by outer MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_OIP   0x02 /**< filter by outer IP Addr */
+#define RTE_ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define RTE_ETH_TUNNEL_FILTER_IMAC  0x08 /**< filter by inner MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define RTE_ETH_TUNNEL_FILTER_IIP   0x20 /**< filter by inner IP addr */
+
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+						RTE_ETH_TUNNEL_FILTER_IVLAN | \
+						RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+					  RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC (RTE_ETH_TUNNEL_FILTER_OMAC | \
+					       RTE_ETH_TUNNEL_FILTER_TENID | \
+					       RTE_ETH_TUNNEL_FILTER_IMAC)
 
 /**
  *  Select IPv4 or IPv6 for tunnel filters.
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 4ea5a657e003..9b6007803dd8 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -101,9 +101,6 @@ static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)	\
 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
 
 static const struct {
@@ -128,14 +125,14 @@ static const struct {
 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
-	RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 };
 
 #undef RTE_RX_OFFLOAD_BIT2STR
 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
 
 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
-	{ DEV_TX_OFFLOAD_##_name, #_name }
+	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
 
 static const struct {
 	uint64_t offload;
@@ -1182,32 +1179,32 @@ uint32_t
 rte_eth_speed_bitflag(uint32_t speed, int duplex)
 {
 	switch (speed) {
-	case ETH_SPEED_NUM_10M:
-		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
-	case ETH_SPEED_NUM_100M:
-		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
-	case ETH_SPEED_NUM_1G:
-		return ETH_LINK_SPEED_1G;
-	case ETH_SPEED_NUM_2_5G:
-		return ETH_LINK_SPEED_2_5G;
-	case ETH_SPEED_NUM_5G:
-		return ETH_LINK_SPEED_5G;
-	case ETH_SPEED_NUM_10G:
-		return ETH_LINK_SPEED_10G;
-	case ETH_SPEED_NUM_20G:
-		return ETH_LINK_SPEED_20G;
-	case ETH_SPEED_NUM_25G:
-		return ETH_LINK_SPEED_25G;
-	case ETH_SPEED_NUM_40G:
-		return ETH_LINK_SPEED_40G;
-	case ETH_SPEED_NUM_50G:
-		return ETH_LINK_SPEED_50G;
-	case ETH_SPEED_NUM_56G:
-		return ETH_LINK_SPEED_56G;
-	case ETH_SPEED_NUM_100G:
-		return ETH_LINK_SPEED_100G;
-	case ETH_SPEED_NUM_200G:
-		return ETH_LINK_SPEED_200G;
+	case RTE_ETH_SPEED_NUM_10M:
+		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+	case RTE_ETH_SPEED_NUM_100M:
+		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+	case RTE_ETH_SPEED_NUM_1G:
+		return RTE_ETH_LINK_SPEED_1G;
+	case RTE_ETH_SPEED_NUM_2_5G:
+		return RTE_ETH_LINK_SPEED_2_5G;
+	case RTE_ETH_SPEED_NUM_5G:
+		return RTE_ETH_LINK_SPEED_5G;
+	case RTE_ETH_SPEED_NUM_10G:
+		return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:
+		return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:
+		return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:
+		return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:
+		return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:
+		return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G:
+		return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G:
+		return RTE_ETH_LINK_SPEED_200G;
 	default:
 		return 0;
 	}
@@ -1528,7 +1525,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If LRO is enabled, check that the maximum aggregated packet
 	 * size is supported by the configured device.
 	 */
-	if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t max_rx_pktlen;
 		uint32_t overhead_len;
 
@@ -1585,12 +1582,12 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
-	if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
-	    (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
 		RTE_ETHDEV_LOG(ERR,
 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
 			port_id,
-			rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -2213,7 +2210,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * size is supported by the configured device.
 	 */
 	/* Get the real Ethernet overhead length */
-	if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
 		uint32_t overhead_len;
 		uint32_t max_rx_pktlen;
 		int ret;
@@ -2793,21 +2790,21 @@ const char *
 rte_eth_link_speed_to_str(uint32_t link_speed)
 {
 	switch (link_speed) {
-	case ETH_SPEED_NUM_NONE: return "None";
-	case ETH_SPEED_NUM_10M:  return "10 Mbps";
-	case ETH_SPEED_NUM_100M: return "100 Mbps";
-	case ETH_SPEED_NUM_1G:   return "1 Gbps";
-	case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
-	case ETH_SPEED_NUM_5G:   return "5 Gbps";
-	case ETH_SPEED_NUM_10G:  return "10 Gbps";
-	case ETH_SPEED_NUM_20G:  return "20 Gbps";
-	case ETH_SPEED_NUM_25G:  return "25 Gbps";
-	case ETH_SPEED_NUM_40G:  return "40 Gbps";
-	case ETH_SPEED_NUM_50G:  return "50 Gbps";
-	case ETH_SPEED_NUM_56G:  return "56 Gbps";
-	case ETH_SPEED_NUM_100G: return "100 Gbps";
-	case ETH_SPEED_NUM_200G: return "200 Gbps";
-	case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+	case RTE_ETH_SPEED_NUM_NONE: return "None";
+	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
+	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
+	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
+	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
+	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
+	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
+	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
+	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
+	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
+	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
 	default: return "Invalid";
 	}
 }
@@ -2831,14 +2828,14 @@ rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 		return -EINVAL;
 	}
 
-	if (eth_link->link_status == ETH_LINK_DOWN)
+	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
 		return snprintf(str, len, "Link down");
 	else
 		return snprintf(str, len, "Link up at %s %s %s",
 			rte_eth_link_speed_to_str(eth_link->link_speed),
-			(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 			"FDX" : "HDX",
-			(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
 			"Autoneg" : "Fixed");
 }
 
@@ -3745,7 +3742,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 	dev = &rte_eth_devices[port_id];
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
-	      DEV_RX_OFFLOAD_VLAN_FILTER)) {
+	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
 			port_id);
 		return -ENOSYS;
@@ -3832,44 +3829,44 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	dev_offloads = orig_offloads;
 
 	/* check which option changed by application */
-	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
-		mask |= ETH_VLAN_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		mask |= RTE_ETH_VLAN_STRIP_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
-		mask |= ETH_VLAN_FILTER_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+		mask |= RTE_ETH_VLAN_FILTER_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
-		mask |= ETH_VLAN_EXTEND_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+		mask |= RTE_ETH_VLAN_EXTEND_MASK;
 	}
 
-	cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
-	org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
 	if (cur != org) {
 		if (cur)
-			dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 		else
-			dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
-		mask |= ETH_QINQ_STRIP_MASK;
+			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+		mask |= RTE_ETH_QINQ_STRIP_MASK;
 	}
 
 	/*no change*/
@@ -3914,17 +3911,17 @@ rte_eth_dev_get_vlan_offload(uint16_t port_id)
 	dev = &rte_eth_devices[port_id];
 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-		ret |= ETH_VLAN_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-		ret |= ETH_VLAN_FILTER_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-		ret |= ETH_VLAN_EXTEND_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
 
-	if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
-		ret |= ETH_QINQ_STRIP_OFFLOAD;
+	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
 
 	return ret;
 }
@@ -4001,7 +3998,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
 		return -EINVAL;
 	}
@@ -4019,7 +4016,7 @@ eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
 {
 	uint16_t i, num;
 
-	num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
 	for (i = 0; i < num; i++) {
 		if (reta_conf[i].mask)
 			return 0;
@@ -4041,8 +4038,8 @@ eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	}
 
 	for (i = 0; i < reta_size; i++) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
 			RTE_ETHDEV_LOG(ERR,
@@ -4198,7 +4195,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4224,7 +4221,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 		return -EINVAL;
 	}
 
-	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
 		return -EINVAL;
 	}
@@ -4365,8 +4362,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			port_id);
 		return -EINVAL;
 	}
-	if (pool >= ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1);
+	if (pool >= RTE_ETH_64_POOLS) {
+		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -6275,7 +6272,7 @@ eth_dev_handle_port_link_status(const char *cmd __rte_unused,
 	rte_tel_data_add_dict_string(d, status_str, "UP");
 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
 	rte_tel_data_add_dict_string(d, "duplex",
-			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
 				"full-duplex" : "half-duplex");
 	return 0;
 }
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index fa4a68532db1..ff608afa960e 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -250,7 +250,7 @@ void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
  * field is not supported, its value is 0.
  * All byte-related statistics do not include Ethernet FCS regardless
  * of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
  */
 struct rte_eth_stats {
 	uint64_t ipackets;  /**< Total number of successfully received packets. */
@@ -281,43 +281,75 @@ struct rte_eth_stats {
 /**@{@name Link speed capabilities
  * Device supported speeds bitmap flags
  */
-#define ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
-#define ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
-#define ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
-#define ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
-#define ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
-#define ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
-#define ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
-#define ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
-#define ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG     RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED       RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD      RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M         RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD     RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M        RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
+#define ETH_LINK_SPEED_1G          RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G        RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
+#define ETH_LINK_SPEED_5G          RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
+#define ETH_LINK_SPEED_10G         RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
+#define ETH_LINK_SPEED_20G         RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
+#define ETH_LINK_SPEED_25G         RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
+#define ETH_LINK_SPEED_40G         RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
+#define ETH_LINK_SPEED_50G         RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
+#define ETH_LINK_SPEED_56G         RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G        RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G        RTE_ETH_LINK_SPEED_200G
 /**@}*/
 
 /**@{@name Link speed
  * Ethernet numeric link speeds in Mbps
  */
-#define ETH_SPEED_NUM_NONE         0 /**< Not defined */
-#define ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
-#define ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
-#define ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
-#define ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
-#define ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
-#define ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
-#define ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
-#define ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
-#define ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
-#define ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE        RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
+#define ETH_SPEED_NUM_10M         RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M        RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
+#define ETH_SPEED_NUM_1G          RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G        RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
+#define ETH_SPEED_NUM_5G          RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
+#define ETH_SPEED_NUM_10G         RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
+#define ETH_SPEED_NUM_20G         RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
+#define ETH_SPEED_NUM_25G         RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
+#define ETH_SPEED_NUM_40G         RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
+#define ETH_SPEED_NUM_50G         RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
+#define ETH_SPEED_NUM_56G         RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G        RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G        RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN     RTE_ETH_SPEED_NUM_UNKNOWN
 /**@}*/
 
 /**
@@ -325,21 +357,27 @@ struct rte_eth_stats {
  */
 __extension__
 struct rte_eth_link {
-	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
-	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
-	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
+	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
+	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
 
 /**@{@name Link negotiation
  * Constants used in link management.
  */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX     RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX     RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN            RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP              RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED           RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG         RTE_ETH_LINK_AUTONEG
 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
 /**@}*/
 
@@ -356,9 +394,12 @@ struct rte_eth_thresh {
 /**@{@name Multi-queue mode
  * @see rte_eth_conf.rxmode.mq_mode.
  */
-#define ETH_MQ_RX_RSS_FLAG  0x1 /**< Enable RSS. @see rte_eth_rss_conf */
-#define ETH_MQ_RX_DCB_FLAG  0x2 /**< Enable DCB. */
-#define ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define RTE_ETH_MQ_RX_RSS_FLAG  0x1
+#define ETH_MQ_RX_RSS_FLAG      RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG  0x2
+#define ETH_MQ_RX_DCB_FLAG      RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4
+#define ETH_MQ_RX_VMDQ_FLAG     RTE_ETH_MQ_RX_VMDQ_FLAG
 /**@}*/
 
 /**
@@ -367,50 +408,49 @@ struct rte_eth_thresh {
  */
 enum rte_eth_rx_mq_mode {
 	/** None of DCB, RSS or VMDq mode */
-	ETH_MQ_RX_NONE = 0,
+	RTE_ETH_MQ_RX_NONE = 0,
 
 	/** For Rx side, only RSS is on */
-	ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
 	/** For Rx side,only DCB is on. */
-	ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Both DCB and RSS enable */
-	ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 
 	/** Only VMDq, no RSS nor DCB */
-	ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** RSS mode with VMDq */
-	ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
 	/** Use VMDq+DCB to route traffic to queues */
-	ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
 	/** Enable both VMDq and DCB in VMDq */
-	ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
-				 ETH_MQ_RX_VMDQ_FLAG,
+	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+				 RTE_ETH_MQ_RX_VMDQ_FLAG,
 };
 
-/**
- * for Rx mq mode backward compatible
- */
-#define ETH_RSS                       ETH_MQ_RX_RSS
-#define VMDQ_DCB                      ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX                    ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE		RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS		RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB		RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS	RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY	RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS	RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB	RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS	RTE_ETH_MQ_RX_VMDQ_DCB_RSS
 
 /**
  * A set of values to identify what method is to be used to transmit
  * packets using multi-TCs.
  */
 enum rte_eth_tx_mq_mode {
-	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
-	ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For Tx side,both DCB and VT is on. */
-	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
+	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
+	RTE_ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
+	RTE_ETH_MQ_TX_VMDQ_DCB,     /**< For Tx side,both DCB and VT is on. */
+	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
-
-/**
- * for Tx mq mode backward compatible
- */
-#define ETH_DCB_NONE                ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX             ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX                  ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE		RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB		RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB	RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY	RTE_ETH_MQ_TX_VMDQ_ONLY
 
 /**
  * A structure used to configure the Rx features of an Ethernet port.
@@ -423,7 +463,7 @@ struct rte_eth_rxmode {
 	uint32_t max_lro_pkt_size;
 	uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
 	/**
-	 * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -438,12 +478,17 @@ struct rte_eth_rxmode {
  * Note that single VLAN is treated the same as inner VLAN.
  */
 enum rte_vlan_type {
-	ETH_VLAN_TYPE_UNKNOWN = 0,
-	ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
-	ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
-	ETH_VLAN_TYPE_MAX,
+	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+	RTE_ETH_VLAN_TYPE_MAX,
 };
 
+#define ETH_VLAN_TYPE_UNKNOWN	RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER	RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER	RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX	RTE_ETH_VLAN_TYPE_MAX
+
 /**
  * A structure used to describe a VLAN filter.
  * If the bit corresponding to a VID is set, such VID is on.
@@ -514,38 +559,70 @@ struct rte_eth_rss_conf {
  * Below macros are defined for RSS offload types, they can be used to
  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
  */
-#define ETH_RSS_IPV4               RTE_BIT64(2)
-#define ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
-#define ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
-#define ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
-#define ETH_RSS_IPV6               RTE_BIT64(8)
-#define ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
-#define ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
-#define ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
-#define ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
-#define ETH_RSS_IPV6_EX            RTE_BIT64(15)
-#define ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
-#define ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
-#define ETH_RSS_PORT               RTE_BIT64(18)
-#define ETH_RSS_VXLAN              RTE_BIT64(19)
-#define ETH_RSS_GENEVE             RTE_BIT64(20)
-#define ETH_RSS_NVGRE              RTE_BIT64(21)
-#define ETH_RSS_GTPU               RTE_BIT64(23)
-#define ETH_RSS_ETH                RTE_BIT64(24)
-#define ETH_RSS_S_VLAN             RTE_BIT64(25)
-#define ETH_RSS_C_VLAN             RTE_BIT64(26)
-#define ETH_RSS_ESP                RTE_BIT64(27)
-#define ETH_RSS_AH                 RTE_BIT64(28)
-#define ETH_RSS_L2TPV3             RTE_BIT64(29)
-#define ETH_RSS_PFCP               RTE_BIT64(30)
-#define ETH_RSS_PPPOE              RTE_BIT64(31)
-#define ETH_RSS_ECPRI              RTE_BIT64(32)
-#define ETH_RSS_MPLS               RTE_BIT64(33)
-#define ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
+#define ETH_RSS_IPV4                   RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
+#define ETH_RSS_FRAG_IPV4              RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
+#define ETH_RSS_NONFRAG_IPV4_TCP       RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
+#define ETH_RSS_NONFRAG_IPV4_UDP       RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP      RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER     RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
+#define ETH_RSS_IPV6                   RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
+#define ETH_RSS_FRAG_IPV6              RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
+#define ETH_RSS_NONFRAG_IPV6_TCP       RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
+#define ETH_RSS_NONFRAG_IPV6_UDP       RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP      RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER     RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
+#define ETH_RSS_L2_PAYLOAD             RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
+#define ETH_RSS_IPV6_EX                RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
+#define ETH_RSS_IPV6_TCP_EX            RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
+#define ETH_RSS_IPV6_UDP_EX            RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT               RTE_BIT64(18)
+#define ETH_RSS_PORT                   RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
+#define ETH_RSS_VXLAN                  RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
+#define ETH_RSS_GENEVE                 RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
+#define ETH_RSS_NVGRE                  RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
+#define ETH_RSS_GTPU                   RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH                RTE_BIT64(24)
+#define ETH_RSS_ETH                    RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
+#define ETH_RSS_S_VLAN                 RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
+#define ETH_RSS_C_VLAN                 RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP                RTE_BIT64(27)
+#define ETH_RSS_ESP                    RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH                 RTE_BIT64(28)
+#define ETH_RSS_AH                     RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
+#define ETH_RSS_L2TPV3                 RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
+#define ETH_RSS_PFCP                   RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
+#define ETH_RSS_PPPOE                  RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
+#define ETH_RSS_ECPRI                  RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
+#define ETH_RSS_MPLS                   RTE_ETH_RSS_MPLS
+#define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
+#define ETH_RSS_IPV4_CHKSUM            RTE_ETH_RSS_IPV4_CHKSUM
 
 /**
  * The ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
@@ -554,41 +631,48 @@ struct rte_eth_rss_conf {
  * checksum type for constructing the use of RSS offload bits.
  *
  * Due to above reason, some old APIs (and configuration) don't support
- * ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
+ * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
  *
  * For the case that checksum is not used in an UDP header,
  * it takes the reserved value 0 as input for the hash function.
  */
-#define ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
+#define ETH_RSS_L4_CHKSUM              RTE_ETH_RSS_L4_CHKSUM
 
 /*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
  * more specific input set selection. These bits are defined starting
  * from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
  * the same level are used simultaneously, it is the same case as none of
  * them are added.
  */
-#define ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
-#define ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
-#define ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
-#define ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
-#define ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
-#define ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
+#define ETH_RSS_L3_SRC_ONLY            RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
+#define ETH_RSS_L3_DST_ONLY            RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
+#define ETH_RSS_L4_SRC_ONLY            RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
+#define ETH_RSS_L4_DST_ONLY            RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
+#define ETH_RSS_L2_SRC_ONLY            RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
+#define ETH_RSS_L2_DST_ONLY            RTE_ETH_RSS_L2_DST_ONLY
 
 /*
  * Only select IPV6 address prefix as RSS input set according to
- * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * https:tools.ietf.org/html/rfc6052
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
  */
-#define RTE_ETH_RSS_L3_PRE32	   RTE_BIT64(57)
-#define RTE_ETH_RSS_L3_PRE40	   RTE_BIT64(56)
-#define RTE_ETH_RSS_L3_PRE48	   RTE_BIT64(55)
-#define RTE_ETH_RSS_L3_PRE56	   RTE_BIT64(54)
-#define RTE_ETH_RSS_L3_PRE64	   RTE_BIT64(53)
-#define RTE_ETH_RSS_L3_PRE96	   RTE_BIT64(52)
+#define RTE_ETH_RSS_L3_PRE32           RTE_BIT64(57)
+#define RTE_ETH_RSS_L3_PRE40           RTE_BIT64(56)
+#define RTE_ETH_RSS_L3_PRE48           RTE_BIT64(55)
+#define RTE_ETH_RSS_L3_PRE56           RTE_BIT64(54)
+#define RTE_ETH_RSS_L3_PRE64           RTE_BIT64(53)
+#define RTE_ETH_RSS_L3_PRE96           RTE_BIT64(52)
 
 /*
  * Use the following macros to combine with the above layers
@@ -603,22 +687,27 @@ struct rte_eth_rss_conf {
  * It basically stands for the innermost encapsulation level RSS
  * can be performed on according to PMD and device capabilities.
  */
-#define ETH_RSS_LEVEL_PMD_DEFAULT       (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT  (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT      RTE_ETH_RSS_LEVEL_PMD_DEFAULT
 
 /**
  * level 1, requests RSS to be performed on the outermost packet
  * encapsulation level.
  */
-#define ETH_RSS_LEVEL_OUTERMOST         (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST    (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST        RTE_ETH_RSS_LEVEL_OUTERMOST
 
 /**
  * level 2, requests RSS to be performed on the specified inner packet
  * encapsulation level, from outermost to innermost (lower to higher values).
  */
-#define ETH_RSS_LEVEL_INNERMOST         (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK              (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST    (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST        RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK         (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK             RTE_ETH_RSS_LEVEL_MASK
 
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf)          RTE_ETH_RSS_LEVEL(rss_hf)
 
 /**
  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
@@ -633,217 +722,275 @@ struct rte_eth_rss_conf {
 static inline uint64_t
 rte_eth_rss_hf_refine(uint64_t rss_hf)
 {
-	if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
 
-	if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
-		rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
 
 	return rss_hf;
 }
 
-#define ETH_RSS_IPV6_PRE32 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32	RTE_ETH_RSS_IPV6_PRE32
 
-#define ETH_RSS_IPV6_PRE40 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40	RTE_ETH_RSS_IPV6_PRE40
 
-#define ETH_RSS_IPV6_PRE48 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48	RTE_ETH_RSS_IPV6_PRE48
 
-#define ETH_RSS_IPV6_PRE56 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56	RTE_ETH_RSS_IPV6_PRE56
 
-#define ETH_RSS_IPV6_PRE64 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64	RTE_ETH_RSS_IPV6_PRE64
 
-#define ETH_RSS_IPV6_PRE96 ( \
-		ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+		RTE_ETH_RSS_IPV6 | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96	RTE_ETH_RSS_IPV6_PRE96
 
-#define ETH_RSS_IPV6_PRE32_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP	RTE_ETH_RSS_IPV6_PRE32_UDP
 
-#define ETH_RSS_IPV6_PRE40_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP	RTE_ETH_RSS_IPV6_PRE40_UDP
 
-#define ETH_RSS_IPV6_PRE48_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP	RTE_ETH_RSS_IPV6_PRE48_UDP
 
-#define ETH_RSS_IPV6_PRE56_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP	RTE_ETH_RSS_IPV6_PRE56_UDP
 
-#define ETH_RSS_IPV6_PRE64_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP	RTE_ETH_RSS_IPV6_PRE64_UDP
 
-#define ETH_RSS_IPV6_PRE96_UDP ( \
-		ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP	RTE_ETH_RSS_IPV6_PRE96_UDP
 
-#define ETH_RSS_IPV6_PRE32_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP	RTE_ETH_RSS_IPV6_PRE32_TCP
 
-#define ETH_RSS_IPV6_PRE40_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP	RTE_ETH_RSS_IPV6_PRE40_TCP
 
-#define ETH_RSS_IPV6_PRE48_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP	RTE_ETH_RSS_IPV6_PRE48_TCP
 
-#define ETH_RSS_IPV6_PRE56_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP	RTE_ETH_RSS_IPV6_PRE56_TCP
 
-#define ETH_RSS_IPV6_PRE64_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP	RTE_ETH_RSS_IPV6_PRE64_TCP
 
-#define ETH_RSS_IPV6_PRE96_TCP ( \
-		ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
 		RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP	RTE_ETH_RSS_IPV6_PRE96_TCP
 
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP	RTE_ETH_RSS_IPV6_PRE32_SCTP
 
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP	RTE_ETH_RSS_IPV6_PRE40_SCTP
 
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP	RTE_ETH_RSS_IPV6_PRE48_SCTP
 
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP	RTE_ETH_RSS_IPV6_PRE56_SCTP
 
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP	RTE_ETH_RSS_IPV6_PRE64_SCTP
 
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
-		ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
 		RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
-	ETH_RSS_VXLAN  | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
-	ETH_RSS_S_VLAN  | \
-	ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP	RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP	RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP	RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP	RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP	RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+	RTE_ETH_RSS_VXLAN  | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL	RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+	RTE_ETH_RSS_S_VLAN  | \
+	RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN	RTE_ETH_RSS_VLAN
 
 /** Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L2_PAYLOAD | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX | \
-	ETH_RSS_PORT  | \
-	ETH_RSS_VXLAN | \
-	ETH_RSS_GENEVE | \
-	ETH_RSS_NVGRE | \
-	ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_FRAG_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_FRAG_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	RTE_ETH_RSS_L2_PAYLOAD | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX | \
+	RTE_ETH_RSS_PORT  | \
+	RTE_ETH_RSS_VXLAN | \
+	RTE_ETH_RSS_GENEVE | \
+	RTE_ETH_RSS_NVGRE | \
+	RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK	RTE_ETH_RSS_PROTO_MASK
 
 /*
  * Definitions used for redirection table entry size.
  * Some RSS RETA sizes may not be supported by some drivers, check the
  * documentation or the description of relevant functions for more details.
  */
-#define ETH_RSS_RETA_SIZE_64  64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE   64
+#define RTE_ETH_RSS_RETA_SIZE_64  64
+#define ETH_RSS_RETA_SIZE_64      RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128     RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256     RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512     RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE   64
+#define RTE_RETA_GROUP_SIZE       RTE_ETH_RETA_GROUP_SIZE
 
 /**@{@name VMDq and DCB maximums */
-#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
-#define ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS       RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES     RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES         RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES              RTE_ETH_DCB_NUM_QUEUES
 /**@}*/
 
 /**@{@name DCB capabilities */
-#define ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT      0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT          RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT     0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT         RTE_ETH_DCB_PFC_SUPPORT
 /**@}*/
 
 /**@{@name VLAN offload bits */
-#define ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
-
-#define ETH_VLAN_STRIP_MASK   0x0001 /**< VLAN Strip  setting mask */
-#define ETH_VLAN_FILTER_MASK  0x0002 /**< VLAN Filter  setting mask*/
-#define ETH_VLAN_EXTEND_MASK  0x0004 /**< VLAN Extend  setting mask*/
-#define ETH_QINQ_STRIP_MASK   0x0008 /**< QINQ Strip  setting mask */
-#define ETH_VLAN_ID_MAX       0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD       RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD      RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD      RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD       RTE_ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK      0x0001 /**< VLAN Strip  setting mask */
+#define ETH_VLAN_STRIP_MASK          RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK     0x0002 /**< VLAN Filter  setting mask*/
+#define ETH_VLAN_FILTER_MASK         RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK     0x0004 /**< VLAN Extend  setting mask*/
+#define ETH_VLAN_EXTEND_MASK         RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK      0x0008 /**< QINQ Strip  setting mask */
+#define ETH_QINQ_STRIP_MASK          RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX              RTE_ETH_VLAN_ID_MAX
 /**@}*/
 
 /* Definitions used for receive MAC address   */
-#define ETH_NUM_RECEIVE_MAC_ADDR  128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR       RTE_ETH_NUM_RECEIVE_MAC_ADDR
 
 /* Definitions used for unicast hash  */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY     RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
 
 /**@{@name VMDq Rx mode
  * @see rte_eth_vmdq_rx_conf.rx_mode
  */
-#define ETH_VMDQ_ACCEPT_UNTAG   0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST   0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST   0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG      0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG          RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC    0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC        RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC    0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC        RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST  0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST      RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST  0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST      RTE_ETH_VMDQ_ACCEPT_MULTICAST
 /**@}*/
 
 /**
@@ -856,7 +1003,7 @@ struct rte_eth_rss_reta_entry64 {
 	/** Mask bits indicate which entries need to be updated/queried. */
 	uint64_t mask;
 	/** Group of 64 redirection table entries. */
-	uint16_t reta[RTE_RETA_GROUP_SIZE];
+	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
 };
 
 /**
@@ -864,38 +1011,44 @@ struct rte_eth_rss_reta_entry64 {
  * in DCB configurations
  */
 enum rte_eth_nb_tcs {
-	ETH_4_TCS = 4, /**< 4 TCs with DCB. */
-	ETH_8_TCS = 8  /**< 8 TCs with DCB. */
+	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
 };
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
 
 /**
  * This enum indicates the possible number of queue pools
  * in VMDq configurations.
  */
 enum rte_eth_nb_pools {
-	ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
-	ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
-	ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
-	ETH_64_POOLS = 64   /**< 64 VMDq pools. */
+	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
+	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
+	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
+	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
 };
+#define ETH_8_POOLS	RTE_ETH_8_POOLS
+#define ETH_16_POOLS	RTE_ETH_16_POOLS
+#define ETH_32_POOLS	RTE_ETH_32_POOLS
+#define ETH_64_POOLS	RTE_ETH_64_POOLS
 
 /* This structure may be extended in future. */
 struct rte_eth_dcb_rx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_dcb_tx_conf {
 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_dcb_tx_conf {
 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
 	/** Traffic class each UP mapped to. */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 struct rte_eth_vmdq_tx_conf {
@@ -921,9 +1074,9 @@ struct rte_eth_vmdq_dcb_conf {
 	struct {
 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
 	/** Selects a queue in a pool */
-	uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
 };
 
 /**
@@ -933,7 +1086,7 @@ struct rte_eth_vmdq_dcb_conf {
  * Using this feature, packets are routed to a pool of queues. By default,
  * the pool selection is based on the MAC address, the VLAN ID in the
  * VLAN tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
  * selection using only the MAC address. MAC address to pool mapping is done
  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
  * corresponding to the pool ID.
@@ -954,7 +1107,7 @@ struct rte_eth_vmdq_rx_conf {
 	struct {
 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
-	} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
 };
 
 /**
@@ -963,7 +1116,7 @@ struct rte_eth_vmdq_rx_conf {
 struct rte_eth_txmode {
 	enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
 	/**
-	 * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
 	 * structure are allowed to be set.
 	 */
@@ -1055,7 +1208,7 @@ struct rte_eth_rxconf {
 	uint16_t share_group;
 	uint16_t share_qid; /**< Shared Rx queue ID in group */
 	/**
-	 * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1084,7 +1237,7 @@ struct rte_eth_txconf {
 
 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
 	/**
-	 * Per-queue Tx offloads to be set  using DEV_TX_OFFLOAD_* flags.
+	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
 	 * fields on rte_eth_dev_info structure are allowed to be set.
 	 */
@@ -1195,12 +1348,17 @@ struct rte_eth_desc_lim {
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
-	RTE_FC_NONE = 0, /**< Disable flow control. */
-	RTE_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
-	RTE_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
-	RTE_FC_FULL      /**< Enable flow control on both side. */
+	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+	RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
+	RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
+	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
 };
 
+#define RTE_FC_NONE	RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE	RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE	RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL	RTE_ETH_FC_FULL
+
 /**
  * A structure used to configure Ethernet flow control parameter.
  * These parameters will be configured into the register of the NIC.
@@ -1231,18 +1389,29 @@ struct rte_eth_pfc_conf {
  * @see rte_eth_udp_tunnel
  */
 enum rte_eth_tunnel_type {
-	RTE_TUNNEL_TYPE_NONE = 0,
-	RTE_TUNNEL_TYPE_VXLAN,
-	RTE_TUNNEL_TYPE_GENEVE,
-	RTE_TUNNEL_TYPE_TEREDO,
-	RTE_TUNNEL_TYPE_NVGRE,
-	RTE_TUNNEL_TYPE_IP_IN_GRE,
-	RTE_L2_TUNNEL_TYPE_E_TAG,
-	RTE_TUNNEL_TYPE_VXLAN_GPE,
-	RTE_TUNNEL_TYPE_ECPRI,
-	RTE_TUNNEL_TYPE_MAX,
+	RTE_ETH_TUNNEL_TYPE_NONE = 0,
+	RTE_ETH_TUNNEL_TYPE_VXLAN,
+	RTE_ETH_TUNNEL_TYPE_GENEVE,
+	RTE_ETH_TUNNEL_TYPE_TEREDO,
+	RTE_ETH_TUNNEL_TYPE_NVGRE,
+	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+	RTE_ETH_TUNNEL_TYPE_ECPRI,
+	RTE_ETH_TUNNEL_TYPE_MAX,
 };
 
+#define RTE_TUNNEL_TYPE_NONE		RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN		RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE		RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO		RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE		RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG	RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI		RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX		RTE_ETH_TUNNEL_TYPE_MAX
+
 /* Deprecated API file for rte_eth_dev_filter_* functions */
 #include "rte_eth_ctrl.h"
 
@@ -1250,11 +1419,16 @@ enum rte_eth_tunnel_type {
  *  Memory space that can be configured to store Flow Director filters
  *  in the board memory.
  */
-enum rte_fdir_pballoc_type {
-	RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
-	RTE_FDIR_PBALLOC_128K,     /**< 128k. */
-	RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+	RTE_ETH_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+	RTE_ETH_FDIR_PBALLOC_128K,     /**< 128k. */
+	RTE_ETH_FDIR_PBALLOC_256K,     /**< 256k. */
 };
+#define rte_fdir_pballoc_type	rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K	RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K	RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K	RTE_ETH_FDIR_PBALLOC_256K
 
 /**
  *  Select report mode of FDIR hash information in Rx descriptors.
@@ -1271,9 +1445,9 @@ enum rte_fdir_status_mode {
  *
  * If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
  */
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
 	enum rte_fdir_mode mode; /**< Flow Director mode. */
-	enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+	enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
 	enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
 	/** Rx queue of packets matching a "drop" filter in perfect mode. */
 	uint8_t drop_queue;
@@ -1282,6 +1456,8 @@ struct rte_fdir_conf {
 	struct rte_eth_fdir_flex_conf flex_conf;
 };
 
+#define rte_fdir_conf rte_eth_fdir_conf
+
 /**
  * UDP tunneling configuration.
  *
@@ -1299,7 +1475,7 @@ struct rte_eth_udp_tunnel {
 /**
  * A structure used to enable/disable specific device interrupts.
  */
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
 	uint32_t lsc:1;
 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
@@ -1308,18 +1484,20 @@ struct rte_intr_conf {
 	uint32_t rmv:1;
 };
 
+#define rte_intr_conf rte_eth_intr_conf
+
 /**
  * A structure used to configure an Ethernet port.
  * Depending upon the Rx multi-queue mode, extra advanced
  * configuration settings may be needed.
  */
 struct rte_eth_conf {
-	uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
-				used. ETH_LINK_SPEED_FIXED disables link
+	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+				used. RTE_ETH_LINK_SPEED_FIXED disables link
 				autonegotiation, and a unique speed shall be
 				set. Otherwise, the bitmap defines the set of
 				speeds to be advertised. If the special value
-				ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
 				supported are advertised. */
 	struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
 	struct rte_eth_txmode txmode; /**< Port Tx configuration. */
@@ -1346,47 +1524,67 @@ struct rte_eth_conf {
 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
 	} tx_adv_conf; /**< Port Tx DCB configuration (union). */
 	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
-	    is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+	    is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
 	uint32_t dcb_capability_en;
-	struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
-	struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+	struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
 };
 
 /**
  * Rx offload capabilities of a device.
  */
-#define DEV_RX_OFFLOAD_VLAN_STRIP  0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT	0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER	0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND	0x00000400
-#define DEV_RX_OFFLOAD_SCATTER		0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP           RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM           RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM            RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM            RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO          0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO              RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP           RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP         RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT     0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT         RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER          RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND          RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER          0x00002000
+#define DEV_RX_OFFLOAD_SCATTER              RTE_ETH_RX_OFFLOAD_SCATTER
 /**
  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_RX_OFFLOAD_TIMESTAMP	0x00004000
-#define DEV_RX_OFFLOAD_SECURITY         0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC		0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM	0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH		0x00080000
-#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
-
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
-				 DEV_RX_OFFLOAD_UDP_CKSUM | \
-				 DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
-			     DEV_RX_OFFLOAD_VLAN_FILTER | \
-			     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-			     DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP        0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP            RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY         0x00008000
+#define DEV_RX_OFFLOAD_SECURITY             RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC         0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC             RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM           RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM      RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH         0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH             RTE_ETH_RX_OFFLOAD_RSS_HASH
+#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     0x00100000
+
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM	RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN	RTE_ETH_RX_OFFLOAD_VLAN
 
 /*
  * If new Rx offload capabilities are defined, they also must be
@@ -1396,54 +1594,76 @@ struct rte_eth_conf {
 /**
  * Tx offload capabilities of a device.
  */
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM  0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM   0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM   0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM  0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO     0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO     0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT          RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM           RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM            RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM            RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM           RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO          0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO              RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO          0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO              RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT          RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    0x00000200    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO        RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO          RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO         RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO       RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT        RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
 /**
  * Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
  * Tx queue without SW lock.
  */
-#define DEV_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE          RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
 /** Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MULTI_SEGS	0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS           RTE_ETH_TX_OFFLOAD_MULTI_SEGS
 /**
  * Device supports optimization for fast release of mbufs.
  * When set application must guarantee that per-queue all mbufs comes from
  * the same mempool and has refcnt = 1.
  */
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE	0x00010000
-#define DEV_TX_OFFLOAD_SECURITY         0x00020000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY         0x00020000
+#define DEV_TX_OFFLOAD_SECURITY             RTE_ETH_TX_OFFLOAD_SECURITY
 /**
  * Device supports generic UDP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO          RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
 /**
  * Device supports generic IP tunneled packet TSO.
  * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
  * for tunnel TSO.
  */
-#define DEV_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO           RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
 /** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM      RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
 /**
  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
  * The mbuf field and flag are registered when the offload is configured.
  */
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP     RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
@@ -1591,7 +1811,7 @@ struct rte_eth_dev_info {
 	uint16_t vmdq_pool_base;  /**< First ID of VMDq pools. */
 	struct rte_eth_desc_lim rx_desc_lim;  /**< Rx descriptors limits */
 	struct rte_eth_desc_lim tx_desc_lim;  /**< Tx descriptors limits */
-	uint32_t speed_capa;  /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
 	/** Configured number of Rx/Tx queues */
 	uint16_t nb_rx_queues; /**< Number of Rx queues. */
 	uint16_t nb_tx_queues; /**< Number of Tx queues. */
@@ -1695,8 +1915,10 @@ struct rte_eth_xstat_name {
 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
 };
 
-#define ETH_DCB_NUM_TCS    8
-#define ETH_MAX_VMDQ_POOL  64
+#define RTE_ETH_DCB_NUM_TCS    8
+#define ETH_DCB_NUM_TCS        RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL  64
+#define ETH_MAX_VMDQ_POOL      RTE_ETH_MAX_VMDQ_POOL
 
 /**
  * A structure used to get the information of queue and
@@ -1707,12 +1929,12 @@ struct rte_eth_dcb_tc_queue_mapping {
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 	/** Rx queues assigned to tc per Pool */
 	struct {
 		uint16_t base;
 		uint16_t nb_queue;
-	} tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
 };
 
 /**
@@ -1721,8 +1943,8 @@ struct rte_eth_dcb_tc_queue_mapping {
  */
 struct rte_eth_dcb_info {
 	uint8_t nb_tcs;        /**< number of TCs */
-	uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
-	uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
+	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
 	/** Rx queues assigned to tc */
 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
 };
@@ -1746,7 +1968,7 @@ enum rte_eth_fec_mode {
 
 /* A structure used to get capabilities per link speed */
 struct rte_eth_fec_capa {
-	uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
 	uint32_t capa;  /**< FEC capabilities bitmask */
 };
 
@@ -2075,14 +2297,14 @@ uint16_t rte_eth_dev_count_total(void);
  * @param speed
  *   Numerical speed value in Mbps
  * @param duplex
- *   ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
  * @return
  *   0 if the speed cannot be mapped
  */
 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 
 /**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2092,7 +2314,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
 
 /**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
  *
  * @param offload
  *   Offload flag.
@@ -2200,7 +2422,7 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
- *   the DEV_RX_OFFLOAD_* flags.
+ *   the RTE_ETH_RX_OFFLOAD_* flags.
  *   If an offloading set in rx_conf->offloads
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
@@ -2777,7 +2999,7 @@ const char *rte_eth_link_speed_to_str(uint32_t link_speed);
  *
  * @param str
  *   A pointer to a string to be filled with textual representation of
- *   device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
  *   store default link status text.
  * @param len
  *   Length of available memory at 'str' string.
@@ -3323,10 +3545,10 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  *   The port identifier of the Ethernet device.
  * @param offload_mask
  *   The VLAN Offload bit mask can be mixed use with "OR"
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
@@ -3342,10 +3564,10 @@ int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
  *   The port identifier of the Ethernet device.
  * @return
  *   - (>0) if successful. Bit mask to indicate
- *       ETH_VLAN_STRIP_OFFLOAD
- *       ETH_VLAN_FILTER_OFFLOAD
- *       ETH_VLAN_EXTEND_OFFLOAD
- *       ETH_QINQ_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_STRIP_OFFLOAD
+ *       RTE_ETH_VLAN_FILTER_OFFLOAD
+ *       RTE_ETH_VLAN_EXTEND_OFFLOAD
+ *       RTE_ETH_QINQ_STRIP_OFFLOAD
  *   - (-ENODEV) if *port_id* invalid.
  */
 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
@@ -5371,7 +5593,7 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
  * of those packets whose transmission was effectively completed.
  *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
  * invoke this function concurrently on the same Tx queue without SW lock.
  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
  *
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index db3392bf9759..59d9d9eeb63f 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -2957,7 +2957,7 @@ struct rte_flow_action_rss {
 	 * through.
 	 */
 	uint32_t level;
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
 	uint32_t key_len; /**< Hash key length in bytes. */
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	const uint8_t *key; /**< Hash key. */
diff --git a/lib/gso/rte_gso.c b/lib/gso/rte_gso.c
index 0d02ec3cee05..119fdcac0b7f 100644
--- a/lib/gso/rte_gso.c
+++ b/lib/gso/rte_gso.c
@@ -15,13 +15,13 @@
 #include "gso_udp4.h"
 
 #define ILLEGAL_UDP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+	((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
 	 (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
 
 #define ILLEGAL_TCP_GSO_CTX(ctx) \
-	((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
-		DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
-		DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+	((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
 		(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
 
 int
@@ -54,28 +54,28 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	ol_flags = pkt->ol_flags;
 
 	if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
 			((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
-			 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+			 (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_tunnel_udp4_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_TCP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_TCP_SEG);
 		ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
 	} else if (IS_IPV4_UDP(pkt->ol_flags) &&
-			(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+			(gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
 		pkt->ol_flags &= (~PKT_TX_UDP_SEG);
 		ret = gso_udp4_segment(pkt, gso_size, direct_pool,
 				indirect_pool, pkts_out, nb_pkts_out);
diff --git a/lib/gso/rte_gso.h b/lib/gso/rte_gso.h
index d93ee8e5b171..0a65afc11e64 100644
--- a/lib/gso/rte_gso.h
+++ b/lib/gso/rte_gso.h
@@ -52,11 +52,11 @@ struct rte_gso_ctx {
 	uint32_t gso_types;
 	/**< the bit mask of required GSO types. The GSO library
 	 * uses the same macros as that of describing device TX
-	 * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+	 * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
 	 * gso_types.
 	 *
 	 * For example, if applications want to segment TCP/IPv4
-	 * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+	 * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
 	 */
 	uint16_t gso_size;
 	/**< maximum size of an output GSO segment, including packet
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index fdaaaf67f2f3..57e871201816 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -185,7 +185,7 @@ extern "C" {
  * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
  * HW capability, At minimum, the PMD should support
  * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
  */
 #define PKT_RX_OUTER_L4_CKSUM_MASK	((1ULL << 21) | (1ULL << 22))
 
@@ -208,7 +208,7 @@ extern "C" {
  * a) Fill outer_l2_len and outer_l3_len in mbuf.
  * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
  * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
  */
 #define PKT_TX_OUTER_UDP_CKSUM     (1ULL << 41)
 
@@ -254,7 +254,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
  * or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
@@ -267,7 +267,7 @@ extern "C" {
  * It can be used for tunnels which are not standards or listed above.
  * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
  * if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
  * Outer and inner checksums are done according to the existing flags like
  * PKT_TX_xxx_CKSUM.
  * Specific tunnel headers that contain payload length, sequence id
diff --git a/lib/mbuf/rte_mbuf_dyn.h b/lib/mbuf/rte_mbuf_dyn.h
index fb03cf1dcf90..29abe8da53cf 100644
--- a/lib/mbuf/rte_mbuf_dyn.h
+++ b/lib/mbuf/rte_mbuf_dyn.h
@@ -37,7 +37,7 @@
  *   of the dynamic field to be registered:
  *   const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
  * - The application initializes the PMD, and asks for this feature
- *   at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ *   at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
  *   rxconf. This will make the PMD to register the field by calling
  *   rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
  *   stores the returned offset.
-- 
2.31.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22 11:03           ` [dpdk-dev] [PATCH v7] " Ferruh Yigit
@ 2021-10-22 11:28             ` Andrew Rybchenko
  2021-10-22 12:29               ` Somnath Kotur
  2021-10-22 13:02               ` Ferruh Yigit
  2021-11-01  9:23             ` Jiawen Wu
  1 sibling, 2 replies; 32+ messages in thread
From: Andrew Rybchenko @ 2021-10-22 11:28 UTC (permalink / raw)
  To: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, Somnath Kotur,
	John Daley, Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing,
	Haiyue Wang, Matan Azrad, Viacheslav Ovsiienko, Keith Wiles,
	Jiayu Hu, Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty,
	Ray Kinsella, Radu Nicolau, Hemant Agrawal, Sachin Saxena,
	Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	John W. Linville, Ciara Loftus, Shepard Siegel, Ed Czeck,
	John Miller, Igor Russkikh, Steven Webster, Matt Peters,
	Chandubabu Namburu, Rasesh Mody, Shahed Shaikh, Bruce Richardson,
	Konstantin Ananyev, Ruifeng Wang, Rahul Lakkireddy,
	Marcin Wojtas, Michal Krawczyk, Shai Brandes, Evgeny Schemeilin,
	Igor Chauskin, Gagandeep Singh, Gaetan Rivet, Ziyang Xuan,
	Xiaoyun Wang, Guoyang Zhou, Yisen Zhuang, Lijun Ou, Jingjing Wu,
	Qiming Yang, Andrew Boyer, Rosen Xu,
	Srisivasubramanian Srinivasan, Jakub Grajciar, Zyta Szpak,
	Liron Himi, Stephen Hemminger, Long Li, Martin Spinler,
	Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa, Harman Kalra,
	Anoob Joseph, Nalla Pradeep, Radha Mohan Chintakuntla,
	Veerasenareddy Burru, Devendra Singh Rawat, Jasvinder Singh,
	Maciej Czekaj, Jian Wang, Maxime Coquelin, Chenbo Xia, Yong Wang,
	Nicolas Chautru, David Hunt, Harry van Haaren, Bernard Iremonger,
	Anatoly Burakov, John McNamara, Kirill Rybalchenko, Byron Marohn,
	Yipeng Wang
  Cc: dev, Tyler Retzlaff, David Marchand

On 10/22/21 2:03 PM, Ferruh Yigit wrote:
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible

LGTM except two lines duplicated above.

> way. The macros for backward compatibility can be removed in next LTS.
> Also updated some struct names to have 'rte_eth' prefix.
> 
> All internal components switched to using new names.
> 
> Syntax fixed on lines that this patch touches.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Acked-by: Jerin Jacob <jerinj@marvell.com>
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
> Acked-by: Rosen Xu <rosen.xu@intel.com>
> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22 11:28             ` Andrew Rybchenko
@ 2021-10-22 12:29               ` Somnath Kotur
  2021-10-22 16:26                 ` Ferruh Yigit
  2021-10-22 13:02               ` Ferruh Yigit
  1 sibling, 1 reply; 32+ messages in thread
From: Somnath Kotur @ 2021-10-22 12:29 UTC (permalink / raw)
  To: Andrew Rybchenko
  Cc: Ferruh Yigit, Maryam Tahhan, Reshma Pattan, Jerin Jacob,
	Wisam Jaddo, Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Viacheslav Ovsiienko, Keith Wiles, Jiayu Hu,
	Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty, Ray Kinsella,
	Radu Nicolau, Hemant Agrawal, Sachin Saxena, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, John W. Linville,
	Ciara Loftus, Shepard Siegel, Ed Czeck, John Miller,
	Igor Russkikh, Steven Webster, Matt Peters, Chandubabu Namburu,
	Rasesh Mody, Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dev, Tyler Retzlaff, David Marchand

[-- Attachment #1: Type: text/plain, Size: 1114 bytes --]

On Fri, 22 Oct 2021, 16:58 Andrew Rybchenko, <andrew.rybchenko@oktetlabs.ru>
wrote:

> On 10/22/21 2:03 PM, Ferruh Yigit wrote:
> > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>
> LGTM except two lines duplicated above.
>
> > way. The macros for backward compatibility can be removed in next LTS.
> > Also updated some struct names to have 'rte_eth' prefix.
> >
> > All internal components switched to using new names.
> >
> > Syntax fixed on lines that this patch touches.
> >
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> > Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> > Acked-by: Jerin Jacob <jerinj@marvell.com>
> > Acked-by: Wisam Jaddo <wisamm@nvidia.com>
> > Acked-by: Rosen Xu <rosen.xu@intel.com>
> > Acked-by: Chenbo Xia <chenbo.xia@intel.com>
> > Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>
Acked-by: Somnath Kotur
somnath.kotur@broadcom.com

>
>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22 11:28             ` Andrew Rybchenko
  2021-10-22 12:29               ` Somnath Kotur
@ 2021-10-22 13:02               ` Ferruh Yigit
  1 sibling, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-22 13:02 UTC (permalink / raw)
  To: Andrew Rybchenko; +Cc: dev

On 10/22/2021 12:28 PM, Andrew Rybchenko wrote:
> On 10/22/21 2:03 PM, Ferruh Yigit wrote:
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>> Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> 
> LGTM except two lines duplicated above.
> 

Thanks Andrew, I will fix it while merging (unless there is a new version).

>> way. The macros for backward compatibility can be removed in next LTS.
>> Also updated some struct names to have 'rte_eth' prefix.
>>
>> All internal components switched to using new names.
>>
>> Syntax fixed on lines that this patch touches.
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
>> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
>> Acked-by: Jerin Jacob <jerinj@marvell.com>
>> Acked-by: Wisam Jaddo <wisamm@nvidia.com>
>> Acked-by: Rosen Xu <rosen.xu@intel.com>
>> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
>> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22 12:29               ` Somnath Kotur
@ 2021-10-22 16:26                 ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-10-22 16:26 UTC (permalink / raw)
  To: Somnath Kotur, Andrew Rybchenko
  Cc: Maryam Tahhan, Reshma Pattan, Jerin Jacob, Wisam Jaddo,
	Cristian Dumitrescu, Xiaoyun Li, Thomas Monjalon,
	Jay Jayatheerthan, Chas Williams, Min Hu (Connor),
	Pavan Nikhilesh, Shijith Thotton, Ajit Khaparde, John Daley,
	Hyong Youb Kim, Qi Zhang, Xiao Wang, Beilei Xing, Haiyue Wang,
	Matan Azrad, Viacheslav Ovsiienko, Keith Wiles, Jiayu Hu,
	Olivier Matz, Ori Kam, Akhil Goyal, Declan Doherty, Ray Kinsella,
	Radu Nicolau, Hemant Agrawal, Sachin Saxena, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, John W. Linville,
	Ciara Loftus, Shepard Siegel, Ed Czeck, John Miller,
	Igor Russkikh, Steven Webster, Matt Peters, Chandubabu Namburu,
	Rasesh Mody, Shahed Shaikh, Bruce Richardson, Konstantin Ananyev,
	Ruifeng Wang, Rahul Lakkireddy, Marcin Wojtas, Michal Krawczyk,
	Shai Brandes, Evgeny Schemeilin, Igor Chauskin, Gagandeep Singh,
	Gaetan Rivet, Ziyang Xuan, Xiaoyun Wang, Guoyang Zhou,
	Yisen Zhuang, Lijun Ou, Jingjing Wu, Qiming Yang, Andrew Boyer,
	Rosen Xu, Srisivasubramanian Srinivasan, Jakub Grajciar,
	Zyta Szpak, Liron Himi, Stephen Hemminger, Long Li,
	Martin Spinler, Heinrich Kuhn, Jiawen Wu, Tetsuya Mukawa,
	Harman Kalra, Anoob Joseph, Nalla Pradeep,
	Radha Mohan Chintakuntla, Veerasenareddy Burru,
	Devendra Singh Rawat, Jasvinder Singh, Maciej Czekaj, Jian Wang,
	Maxime Coquelin, Chenbo Xia, Yong Wang, Nicolas Chautru,
	David Hunt, Harry van Haaren, Bernard Iremonger, Anatoly Burakov,
	John McNamara, Kirill Rybalchenko, Byron Marohn, Yipeng Wang,
	dev, Tyler Retzlaff, David Marchand


> On Fri, 22 Oct 2021, 16:58 Andrew Rybchenko, <andrew.rybchenko@oktetlabs.ru <mailto:andrew.rybchenko@oktetlabs.ru>> wrote:
> 
>     On 10/22/21 2:03 PM, Ferruh Yigit wrote:
>      > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
>      > Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
> 
>     LGTM except two lines duplicated above.
> 
>      > way. The macros for backward compatibility can be removed in next LTS.
>      > Also updated some struct names to have 'rte_eth' prefix.
>      >
>      > All internal components switched to using new names.
>      >
>      > Syntax fixed on lines that this patch touches.
>      >
>      > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com <mailto:ferruh.yigit@intel.com>>
>      > Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com <mailto:roretzla@linux.microsoft.com>>
>      > Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru <mailto:andrew.rybchenko@oktetlabs.ru>>
>      > Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com <mailto:ajit.khaparde@broadcom.com>>
>      > Acked-by: Jerin Jacob <jerinj@marvell.com <mailto:jerinj@marvell.com>>
>      > Acked-by: Wisam Jaddo <wisamm@nvidia.com <mailto:wisamm@nvidia.com>>
>      > Acked-by: Rosen Xu <rosen.xu@intel.com <mailto:rosen.xu@intel.com>>
>      > Acked-by: Chenbo Xia <chenbo.xia@intel.com <mailto:chenbo.xia@intel.com>>
>      > Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com <mailto:hemant.agrawal@nxp.com>>
> 
> Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
> 

Applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-10-22 11:03           ` [dpdk-dev] [PATCH v7] " Ferruh Yigit
  2021-10-22 11:28             ` Andrew Rybchenko
@ 2021-11-01  9:23             ` Jiawen Wu
  2021-11-01 12:39               ` Ferruh Yigit
  1 sibling, 1 reply; 32+ messages in thread
From: Jiawen Wu @ 2021-11-01  9:23 UTC (permalink / raw)
  To: 'Ferruh Yigit'; +Cc: dev

On October 22, 2021 7:03 PM, Ferruh Yigit wrote:
> 
> diff --git a/drivers/net/txgbe/txgbe_ethdev.c
> b/drivers/net/txgbe/txgbe_ethdev.c
> index 7b46ffb68635..0b0f9db7cb2a 100644
> --- a/drivers/net/txgbe/txgbe_ethdev.c
> +++ b/drivers/net/txgbe/txgbe_ethdev.c
> @@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev
> *dev,
>  	int wait = 1;
> 
>  	memset(&link, 0, sizeof(link));
> -	link.link_status = ETH_LINK_DOWN;
> -	link.link_speed = ETH_SPEED_NUM_NONE;
> -	link.link_duplex = ETH_LINK_HALF_DUPLEX;
> +	link.link_status = RTE_ETH_LINK_DOWN;
> +	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
> +	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
>  	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
> -			ETH_LINK_SPEED_FIXED);
> +			RTE_ETH_LINK_AUTONEG);
> 

Hi Ferruh,
Is there some wrong that 'ETH_LINK_SPEED_FIXED' is changed to
auto-negotiation?

>  	hw->mac.get_link_status = true;
> 




^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-11-01  9:23             ` Jiawen Wu
@ 2021-11-01 12:39               ` Ferruh Yigit
  2021-11-01 12:48                 ` Ferruh Yigit
  0 siblings, 1 reply; 32+ messages in thread
From: Ferruh Yigit @ 2021-11-01 12:39 UTC (permalink / raw)
  To: Jiawen Wu; +Cc: dev

On 11/1/2021 9:23 AM, Jiawen Wu wrote:
> On October 22, 2021 7:03 PM, Ferruh Yigit wrote:
>>
>> diff --git a/drivers/net/txgbe/txgbe_ethdev.c
>> b/drivers/net/txgbe/txgbe_ethdev.c
>> index 7b46ffb68635..0b0f9db7cb2a 100644
>> --- a/drivers/net/txgbe/txgbe_ethdev.c
>> +++ b/drivers/net/txgbe/txgbe_ethdev.c
>> @@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev
>> *dev,
>>   	int wait = 1;
>>
>>   	memset(&link, 0, sizeof(link));
>> -	link.link_status = ETH_LINK_DOWN;
>> -	link.link_speed = ETH_SPEED_NUM_NONE;
>> -	link.link_duplex = ETH_LINK_HALF_DUPLEX;
>> +	link.link_status = RTE_ETH_LINK_DOWN;
>> +	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
>> +	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
>>   	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
>> -			ETH_LINK_SPEED_FIXED);
>> +			RTE_ETH_LINK_AUTONEG);
>>
> 
> Hi Ferruh,
> Is there some wrong that 'ETH_LINK_SPEED_FIXED' is changed to
> auto-negotiation?
> 

Not sure how it happened, but I will send a patch to fix it.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-11-01 12:39               ` Ferruh Yigit
@ 2021-11-01 12:48                 ` Ferruh Yigit
  2021-11-01 13:20                   ` Ferruh Yigit
  0 siblings, 1 reply; 32+ messages in thread
From: Ferruh Yigit @ 2021-11-01 12:48 UTC (permalink / raw)
  To: Jiawen Wu; +Cc: dev

On 11/1/2021 12:39 PM, Ferruh Yigit wrote:
> On 11/1/2021 9:23 AM, Jiawen Wu wrote:
>> On October 22, 2021 7:03 PM, Ferruh Yigit wrote:
>>>
>>> diff --git a/drivers/net/txgbe/txgbe_ethdev.c
>>> b/drivers/net/txgbe/txgbe_ethdev.c
>>> index 7b46ffb68635..0b0f9db7cb2a 100644
>>> --- a/drivers/net/txgbe/txgbe_ethdev.c
>>> +++ b/drivers/net/txgbe/txgbe_ethdev.c
>>> @@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev
>>> *dev,
>>>       int wait = 1;
>>>
>>>       memset(&link, 0, sizeof(link));
>>> -    link.link_status = ETH_LINK_DOWN;
>>> -    link.link_speed = ETH_SPEED_NUM_NONE;
>>> -    link.link_duplex = ETH_LINK_HALF_DUPLEX;
>>> +    link.link_status = RTE_ETH_LINK_DOWN;
>>> +    link.link_speed = RTE_ETH_SPEED_NUM_NONE;
>>> +    link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
>>>       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
>>> -            ETH_LINK_SPEED_FIXED);
>>> +            RTE_ETH_LINK_AUTONEG);
>>>
>>
>> Hi Ferruh,
>> Is there some wrong that 'ETH_LINK_SPEED_FIXED' is changed to
>> auto-negotiation?
>>
> 
> Not sure how it happened, but I will send a patch to fix it.

Ahh, there is another driver change [1] between this patch versions,
that seems lead the conflict resolve error.

[1]
Commit 196f0e123bcb ("net/txgbe: set fixed flag for exact link speed")

  -       link.link_autoneg = ETH_LINK_AUTONEG;
  +       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
  +                       ETH_LINK_SPEED_FIXED);

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: add namespace
  2021-11-01 12:48                 ` Ferruh Yigit
@ 2021-11-01 13:20                   ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2021-11-01 13:20 UTC (permalink / raw)
  To: Jiawen Wu; +Cc: dev

On 11/1/2021 12:48 PM, Ferruh Yigit wrote:
> On 11/1/2021 12:39 PM, Ferruh Yigit wrote:
>> On 11/1/2021 9:23 AM, Jiawen Wu wrote:
>>> On October 22, 2021 7:03 PM, Ferruh Yigit wrote:
>>>>
>>>> diff --git a/drivers/net/txgbe/txgbe_ethdev.c
>>>> b/drivers/net/txgbe/txgbe_ethdev.c
>>>> index 7b46ffb68635..0b0f9db7cb2a 100644
>>>> --- a/drivers/net/txgbe/txgbe_ethdev.c
>>>> +++ b/drivers/net/txgbe/txgbe_ethdev.c
>>>> @@ -2695,11 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev
>>>> *dev,
>>>>       int wait = 1;
>>>>
>>>>       memset(&link, 0, sizeof(link));
>>>> -    link.link_status = ETH_LINK_DOWN;
>>>> -    link.link_speed = ETH_SPEED_NUM_NONE;
>>>> -    link.link_duplex = ETH_LINK_HALF_DUPLEX;
>>>> +    link.link_status = RTE_ETH_LINK_DOWN;
>>>> +    link.link_speed = RTE_ETH_SPEED_NUM_NONE;
>>>> +    link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
>>>>       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
>>>> -            ETH_LINK_SPEED_FIXED);
>>>> +            RTE_ETH_LINK_AUTONEG);
>>>>
>>>
>>> Hi Ferruh,
>>> Is there some wrong that 'ETH_LINK_SPEED_FIXED' is changed to
>>> auto-negotiation?
>>>
>>
>> Not sure how it happened, but I will send a patch to fix it.
> 
> Ahh, there is another driver change [1] between this patch versions,
> that seems lead the conflict resolve error.
> 
> [1]
> Commit 196f0e123bcb ("net/txgbe: set fixed flag for exact link speed")
> 
>   -       link.link_autoneg = ETH_LINK_AUTONEG;
>   +       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
>   +                       ETH_LINK_SPEED_FIXED);


Can you please check:
https://patches.dpdk.org/project/dpdk/patch/20211101131932.3398023-1-ferruh.yigit@intel.com/

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2021-11-01 13:20 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-29 13:46 [dpdk-dev] [PATCH] ethdev: add namespace Ferruh Yigit
2021-06-29 15:02 ` Tyler Retzlaff
2021-06-30  6:29 ` David Marchand
2021-06-30  9:08   ` Ferruh Yigit
2021-08-27  1:19 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
2021-08-27  7:59   ` Andrew Rybchenko
2021-08-27 20:24     ` Ferruh Yigit
2021-08-28 14:26     ` Ajit Khaparde
2021-08-29  7:47       ` Jerin Jacob
2021-08-29  8:17       ` Wisam Monther
2021-08-30  2:13         ` Xu, Rosen
2021-08-30  5:27           ` Xia, Chenbo
2021-08-30  6:24         ` Hemant Agrawal
2021-08-30  9:41   ` David Marchand
2021-08-30 17:01     ` Ferruh Yigit
2021-08-30 17:19   ` [dpdk-dev] [PATCH v3] " Ferruh Yigit
2021-08-31  7:59     ` Thomas Monjalon
2021-10-18 15:43     ` [dpdk-dev] [PATCH v4] " Ferruh Yigit
2021-10-20 19:23       ` [dpdk-dev] [PATCH v5] " Ferruh Yigit
2021-10-22  2:02         ` [dpdk-dev] [PATCH v6] " Ferruh Yigit
2021-10-22  6:44           ` Andrew Rybchenko
2021-10-22  8:25             ` Ferruh Yigit
2021-10-22  9:48           ` Pattan, Reshma
2021-10-22 11:03           ` [dpdk-dev] [PATCH v7] " Ferruh Yigit
2021-10-22 11:28             ` Andrew Rybchenko
2021-10-22 12:29               ` Somnath Kotur
2021-10-22 16:26                 ` Ferruh Yigit
2021-10-22 13:02               ` Ferruh Yigit
2021-11-01  9:23             ` Jiawen Wu
2021-11-01 12:39               ` Ferruh Yigit
2021-11-01 12:48                 ` Ferruh Yigit
2021-11-01 13:20                   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).